diff --git a/.gitattributes b/.gitattributes index 9788a8d4e431a008d980bc3c9bd15d98a7f8b005..01ffe3dbb795ee0a0fa48d247c128b5ce86bdb7b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -383,3 +383,6 @@ llava_next/lib/python3.10/site-packages/matplotlib/mpl-data/fonts/ttf/DejaVuSeri llava_next/lib/python3.10/pydoc_data/__pycache__/topics.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_next/lib/python3.10/html/__pycache__/entities.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/torch/__pycache__/_tensor_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +llava/lib/libtinfo.a filter=lfs diff=lfs merge=lfs -text +llava/lib/libz.a filter=lfs diff=lfs merge=lfs -text diff --git a/llava/lib/libtinfo.a b/llava/lib/libtinfo.a new file mode 100644 index 0000000000000000000000000000000000000000..cd399be0f72c02fef62609671de4708cf4bde378 --- /dev/null +++ b/llava/lib/libtinfo.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68510564cd1df8040ea826fc013ad16e413d278270d5d01281c334d75d7c427b +size 489850 diff --git a/llava/lib/libz.a b/llava/lib/libz.a new file mode 100644 index 0000000000000000000000000000000000000000..ecdb859ed04ce72c6ba93cdad52cd1f078396c4b --- /dev/null +++ b/llava/lib/libz.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd684d6a6a4060c8a4193d95db812ca07047890ae0fe8d62cc34bb60370ffc4 +size 165662 diff --git a/llava/lib/tcl8.6/auto.tcl b/llava/lib/tcl8.6/auto.tcl new file mode 100644 index 0000000000000000000000000000000000000000..f293a38f0433f426e30a168392075f52140dc678 --- /dev/null +++ b/llava/lib/tcl8.6/auto.tcl @@ -0,0 +1,648 @@ +# auto.tcl -- +# +# utility procs formerly in init.tcl dealing with auto execution of commands +# and can be auto loaded themselves. +# +# Copyright (c) 1991-1993 The Regents of the University of California. +# Copyright (c) 1994-1998 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution of +# this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +# auto_reset -- +# +# Destroy all cached information for auto-loading and auto-execution, so that +# the information gets recomputed the next time it's needed. Also delete any +# commands that are listed in the auto-load index. +# +# Arguments: +# None. + +proc auto_reset {} { + global auto_execs auto_index auto_path + if {[array exists auto_index]} { + foreach cmdName [array names auto_index] { + set fqcn [namespace which $cmdName] + if {$fqcn eq ""} { + continue + } + rename $fqcn {} + } + } + unset -nocomplain auto_execs auto_index ::tcl::auto_oldpath + if {[catch {llength $auto_path}]} { + set auto_path [list [info library]] + } elseif {[info library] ni $auto_path} { + lappend auto_path [info library] + } +} + +# tcl_findLibrary -- +# +# This is a utility for extensions that searches for a library directory +# using a canonical searching algorithm. A side effect is to source the +# initialization script and set a global library variable. +# +# Arguments: +# basename Prefix of the directory name, (e.g., "tk") +# version Version number of the package, (e.g., "8.0") +# patch Patchlevel of the package, (e.g., "8.0.3") +# initScript Initialization script to source (e.g., tk.tcl) +# enVarName environment variable to honor (e.g., TK_LIBRARY) +# varName Global variable to set when done (e.g., tk_library) + +proc tcl_findLibrary {basename version patch initScript enVarName varName} { + upvar #0 $varName the_library + global auto_path env tcl_platform + + set dirs {} + set errors {} + + # The C application may have hardwired a path, which we honor + + if {[info exists the_library] && $the_library ne ""} { + lappend dirs $the_library + } else { + # Do the canonical search + + # 1. From an environment variable, if it exists. Placing this first + # gives the end-user ultimate control to work-around any bugs, or + # to customize. + + if {[info exists env($enVarName)]} { + lappend dirs $env($enVarName) + } + + # 2. In the package script directory registered within the + # configuration of the package itself. + + catch { + lappend dirs [::${basename}::pkgconfig get scriptdir,runtime] + } + + # 3. Relative to auto_path directories. This checks relative to the + # Tcl library as well as allowing loading of libraries added to the + # auto_path that is not relative to the core library or binary paths. + foreach d $auto_path { + lappend dirs [file join $d $basename$version] + if {$tcl_platform(platform) eq "unix" + && $tcl_platform(os) eq "Darwin"} { + # 4. On MacOSX, check the Resources/Scripts subdir too + lappend dirs [file join $d $basename$version Resources Scripts] + } + } + + # 3. Various locations relative to the executable + # ../lib/foo1.0 (From bin directory in install hierarchy) + # ../../lib/foo1.0 (From bin/arch directory in install hierarchy) + # ../library (From unix directory in build hierarchy) + # + # Remaining locations are out of date (when relevant, they ought to be + # covered by the $::auto_path seach above) and disabled. + # + # ../../library (From unix/arch directory in build hierarchy) + # ../../foo1.0.1/library + # (From unix directory in parallel build hierarchy) + # ../../../foo1.0.1/library + # (From unix/arch directory in parallel build hierarchy) + + set parentDir [file dirname [file dirname [info nameofexecutable]]] + set grandParentDir [file dirname $parentDir] + lappend dirs [file join $parentDir lib $basename$version] + lappend dirs [file join $grandParentDir lib $basename$version] + lappend dirs [file join $parentDir library] + if {0} { + lappend dirs [file join $grandParentDir library] + lappend dirs [file join $grandParentDir $basename$patch library] + lappend dirs [file join [file dirname $grandParentDir] \ + $basename$patch library] + } + } + # make $dirs unique, preserving order + array set seen {} + foreach i $dirs { + # Make sure $i is unique under normalization. Avoid repeated [source]. + if {[interp issafe]} { + # Safe interps have no [file normalize]. + set norm $i + } else { + set norm [file normalize $i] + } + if {[info exists seen($norm)]} { + continue + } + set seen($norm) {} + + set the_library $i + set file [file join $i $initScript] + + # source everything when in a safe interpreter because we have a + # source command, but no file exists command + + if {[interp issafe] || [file exists $file]} { + if {![catch {uplevel #0 [list source $file]} msg opts]} { + return + } + append errors "$file: $msg\n" + append errors [dict get $opts -errorinfo]\n + } + } + unset -nocomplain the_library + set msg "Can't find a usable $initScript in the following directories: \n" + append msg " $dirs\n\n" + append msg "$errors\n\n" + append msg "This probably means that $basename wasn't installed properly.\n" + error $msg +} + + +# ---------------------------------------------------------------------- +# auto_mkindex +# ---------------------------------------------------------------------- +# The following procedures are used to generate the tclIndex file from Tcl +# source files. They use a special safe interpreter to parse Tcl source +# files, writing out index entries as "proc" commands are encountered. This +# implementation won't work in a safe interpreter, since a safe interpreter +# can't create the special parser and mess with its commands. + +if {[interp issafe]} { + return ;# Stop sourcing the file here +} + +# auto_mkindex -- +# Regenerate a tclIndex file from Tcl source files. Takes as argument the +# name of the directory in which the tclIndex file is to be placed, followed +# by any number of glob patterns to use in that directory to locate all of the +# relevant files. +# +# Arguments: +# dir - Name of the directory in which to create an index. + +# args - Any number of additional arguments giving the names of files +# within dir. If no additional are given auto_mkindex will look +# for *.tcl. + +proc auto_mkindex {dir args} { + if {[interp issafe]} { + error "can't generate index within safe interpreter" + } + + set oldDir [pwd] + cd $dir + + append index "# Tcl autoload index file, version 2.0\n" + append index "# This file is generated by the \"auto_mkindex\" command\n" + append index "# and sourced to set up indexing information for one or\n" + append index "# more commands. Typically each line is a command that\n" + append index "# sets an element in the auto_index array, where the\n" + append index "# element name is the name of a command and the value is\n" + append index "# a script that loads the command.\n\n" + if {![llength $args]} { + set args *.tcl + } + + auto_mkindex_parser::init + foreach file [lsort [glob -- {*}$args]] { + try { + append index [auto_mkindex_parser::mkindex $file] + } on error {msg opts} { + cd $oldDir + return -options $opts $msg + } + } + auto_mkindex_parser::cleanup + + set fid [open "tclIndex" w] + puts -nonewline $fid $index + close $fid + cd $oldDir +} + +# Original version of auto_mkindex that just searches the source code for +# "proc" at the beginning of the line. + +proc auto_mkindex_old {dir args} { + set oldDir [pwd] + cd $dir + set dir [pwd] + append index "# Tcl autoload index file, version 2.0\n" + append index "# This file is generated by the \"auto_mkindex\" command\n" + append index "# and sourced to set up indexing information for one or\n" + append index "# more commands. Typically each line is a command that\n" + append index "# sets an element in the auto_index array, where the\n" + append index "# element name is the name of a command and the value is\n" + append index "# a script that loads the command.\n\n" + if {![llength $args]} { + set args *.tcl + } + foreach file [lsort [glob -- {*}$args]] { + set f "" + set error [catch { + set f [open $file] + fconfigure $f -eofchar "\x1A {}" + while {[gets $f line] >= 0} { + if {[regexp {^proc[ ]+([^ ]*)} $line match procName]} { + set procName [lindex [auto_qualify $procName "::"] 0] + append index "set [list auto_index($procName)]" + append index " \[list source \[file join \$dir [list $file]\]\]\n" + } + } + close $f + } msg opts] + if {$error} { + catch {close $f} + cd $oldDir + return -options $opts $msg + } + } + set f "" + set error [catch { + set f [open tclIndex w] + puts -nonewline $f $index + close $f + cd $oldDir + } msg opts] + if {$error} { + catch {close $f} + cd $oldDir + error $msg $info $code + return -options $opts $msg + } +} + +# Create a safe interpreter that can be used to parse Tcl source files +# generate a tclIndex file for autoloading. This interp contains commands for +# things that need index entries. Each time a command is executed, it writes +# an entry out to the index file. + +namespace eval auto_mkindex_parser { + variable parser "" ;# parser used to build index + variable index "" ;# maintains index as it is built + variable scriptFile "" ;# name of file being processed + variable contextStack "" ;# stack of namespace scopes + variable imports "" ;# keeps track of all imported cmds + variable initCommands ;# list of commands that create aliases + if {![info exists initCommands]} { + set initCommands [list] + } + + proc init {} { + variable parser + variable initCommands + + if {![interp issafe]} { + set parser [interp create -safe] + $parser hide info + $parser hide rename + $parser hide proc + $parser hide namespace + $parser hide eval + $parser hide puts + foreach ns [$parser invokehidden namespace children ::] { + # MUST NOT DELETE "::tcl" OR BAD THINGS HAPPEN! + if {$ns eq "::tcl"} continue + $parser invokehidden namespace delete $ns + } + foreach cmd [$parser invokehidden info commands ::*] { + $parser invokehidden rename $cmd {} + } + $parser invokehidden proc unknown {args} {} + + # We'll need access to the "namespace" command within the + # interp. Put it back, but move it out of the way. + + $parser expose namespace + $parser invokehidden rename namespace _%@namespace + $parser expose eval + $parser invokehidden rename eval _%@eval + + # Install all the registered pseudo-command implementations + + foreach cmd $initCommands { + eval $cmd + } + } + } + proc cleanup {} { + variable parser + interp delete $parser + unset parser + } +} + +# auto_mkindex_parser::mkindex -- +# +# Used by the "auto_mkindex" command to create a "tclIndex" file for the given +# Tcl source file. Executes the commands in the file, and handles things like +# the "proc" command by adding an entry for the index file. Returns a string +# that represents the index file. +# +# Arguments: +# file Name of Tcl source file to be indexed. + +proc auto_mkindex_parser::mkindex {file} { + variable parser + variable index + variable scriptFile + variable contextStack + variable imports + + set scriptFile $file + + set fid [open $file] + fconfigure $fid -eofchar "\x1A {}" + set contents [read $fid] + close $fid + + # There is one problem with sourcing files into the safe interpreter: + # references like "$x" will fail since code is not really being executed + # and variables do not really exist. To avoid this, we replace all $ with + # \0 (literally, the null char) later, when getting proc names we will + # have to reverse this replacement, in case there were any $ in the proc + # name. This will cause a problem if somebody actually tries to have a \0 + # in their proc name. Too bad for them. + set contents [string map [list \$ \0] $contents] + + set index "" + set contextStack "" + set imports "" + + $parser eval $contents + + foreach name $imports { + catch {$parser eval [list _%@namespace forget $name]} + } + return $index +} + +# auto_mkindex_parser::hook command +# +# Registers a Tcl command to evaluate when initializing the child interpreter +# used by the mkindex parser. The command is evaluated in the parent +# interpreter, and can use the variable auto_mkindex_parser::parser to get to +# the child + +proc auto_mkindex_parser::hook {cmd} { + variable initCommands + + lappend initCommands $cmd +} + +# auto_mkindex_parser::slavehook command +# +# Registers a Tcl command to evaluate when initializing the child interpreter +# used by the mkindex parser. The command is evaluated in the child +# interpreter. + +proc auto_mkindex_parser::slavehook {cmd} { + variable initCommands + + # The $parser variable is defined to be the name of the child interpreter + # when this command is used later. + + lappend initCommands "\$parser eval [list $cmd]" +} + +# auto_mkindex_parser::command -- +# +# Registers a new command with the "auto_mkindex_parser" interpreter that +# parses Tcl files. These commands are fake versions of things like the +# "proc" command. When you execute them, they simply write out an entry to a +# "tclIndex" file for auto-loading. +# +# This procedure allows extensions to register their own commands with the +# auto_mkindex facility. For example, a package like [incr Tcl] might +# register a "class" command so that class definitions could be added to a +# "tclIndex" file for auto-loading. +# +# Arguments: +# name Name of command recognized in Tcl files. +# arglist Argument list for command. +# body Implementation of command to handle indexing. + +proc auto_mkindex_parser::command {name arglist body} { + hook [list auto_mkindex_parser::commandInit $name $arglist $body] +} + +# auto_mkindex_parser::commandInit -- +# +# This does the actual work set up by auto_mkindex_parser::command. This is +# called when the interpreter used by the parser is created. +# +# Arguments: +# name Name of command recognized in Tcl files. +# arglist Argument list for command. +# body Implementation of command to handle indexing. + +proc auto_mkindex_parser::commandInit {name arglist body} { + variable parser + + set ns [namespace qualifiers $name] + set tail [namespace tail $name] + if {$ns eq ""} { + set fakeName [namespace current]::_%@fake_$tail + } else { + set fakeName [namespace current]::[string map {:: _} _%@fake_$name] + } + proc $fakeName $arglist $body + + # YUK! Tcl won't let us alias fully qualified command names, so we can't + # handle names like "::itcl::class". Instead, we have to build procs with + # the fully qualified names, and have the procs point to the aliases. + + if {[string match *::* $name]} { + set exportCmd [list _%@namespace export [namespace tail $name]] + $parser eval [list _%@namespace eval $ns $exportCmd] + + # The following proc definition does not work if you want to tolerate + # space or something else diabolical in the procedure name, (i.e., + # space in $alias). The following does not work: + # "_%@eval {$alias} \$args" + # because $alias gets concat'ed to $args. The following does not work + # because $cmd is somehow undefined + # "set cmd {$alias} \; _%@eval {\$cmd} \$args" + # A gold star to someone that can make test autoMkindex-3.3 work + # properly + + set alias [namespace tail $fakeName] + $parser invokehidden proc $name {args} "_%@eval {$alias} \$args" + $parser alias $alias $fakeName + } else { + $parser alias $name $fakeName + } + return +} + +# auto_mkindex_parser::fullname -- +# +# Used by commands like "proc" within the auto_mkindex parser. Returns the +# qualified namespace name for the "name" argument. If the "name" does not +# start with "::", elements are added from the current namespace stack to +# produce a qualified name. Then, the name is examined to see whether or not +# it should really be qualified. If the name has more than the leading "::", +# it is returned as a fully qualified name. Otherwise, it is returned as a +# simple name. That way, the Tcl autoloader will recognize it properly. +# +# Arguments: +# name - Name that is being added to index. + +proc auto_mkindex_parser::fullname {name} { + variable contextStack + + if {![string match ::* $name]} { + foreach ns $contextStack { + set name "${ns}::$name" + if {[string match ::* $name]} { + break + } + } + } + + if {[namespace qualifiers $name] eq ""} { + set name [namespace tail $name] + } elseif {![string match ::* $name]} { + set name "::$name" + } + + # Earlier, mkindex replaced all $'s with \0. Now, we have to reverse that + # replacement. + return [string map [list \0 \$] $name] +} + +# auto_mkindex_parser::indexEntry -- +# +# Used by commands like "proc" within the auto_mkindex parser to add a +# correctly-quoted entry to the index. This is shared code so it is done +# *right*, in one place. +# +# Arguments: +# name - Name that is being added to index. + +proc auto_mkindex_parser::indexEntry {name} { + variable index + variable scriptFile + + # We convert all metacharacters to their backslashed form, and pre-split + # the file name that we know about (which will be a proper list, and so + # correctly quoted). + + set name [string range [list \}[fullname $name]] 2 end] + set filenameParts [file split $scriptFile] + + append index [format \ + {set auto_index(%s) [list source [file join $dir %s]]%s} \ + $name $filenameParts \n] + return +} + +if {[llength $::auto_mkindex_parser::initCommands]} { + return +} + +# Register all of the procedures for the auto_mkindex parser that will build +# the "tclIndex" file. + +# AUTO MKINDEX: proc name arglist body +# Adds an entry to the auto index list for the given procedure name. + +auto_mkindex_parser::command proc {name args} { + indexEntry $name +} + +# Conditionally add support for Tcl byte code files. There are some tricky +# details here. First, we need to get the tbcload library initialized in the +# current interpreter. We cannot load tbcload into the child until we have +# done so because it needs access to the tcl_patchLevel variable. Second, +# because the package index file may defer loading the library until we invoke +# a command, we need to explicitly invoke auto_load to force it to be loaded. +# This should be a noop if the package has already been loaded + +auto_mkindex_parser::hook { + try { + package require tbcload + } on error {} { + # OK, don't have it so do nothing + } on ok {} { + if {[namespace which -command tbcload::bcproc] eq ""} { + auto_load tbcload::bcproc + } + load {} tbcload $auto_mkindex_parser::parser + + # AUTO MKINDEX: tbcload::bcproc name arglist body + # Adds an entry to the auto index list for the given precompiled + # procedure name. + + auto_mkindex_parser::commandInit tbcload::bcproc {name args} { + indexEntry $name + } + } +} + +# AUTO MKINDEX: namespace eval name command ?arg arg...? +# Adds the namespace name onto the context stack and evaluates the associated +# body of commands. +# +# AUTO MKINDEX: namespace import ?-force? pattern ?pattern...? +# Performs the "import" action in the parser interpreter. This is important +# for any commands contained in a namespace that affect the index. For +# example, a script may say "itcl::class ...", or it may import "itcl::*" and +# then say "class ...". This procedure does the import operation, but keeps +# track of imported patterns so we can remove the imports later. + +auto_mkindex_parser::command namespace {op args} { + switch -- $op { + eval { + variable parser + variable contextStack + + set name [lindex $args 0] + set args [lrange $args 1 end] + + set contextStack [linsert $contextStack 0 $name] + $parser eval [list _%@namespace eval $name] $args + set contextStack [lrange $contextStack 1 end] + } + import { + variable parser + variable imports + foreach pattern $args { + if {$pattern ne "-force"} { + lappend imports $pattern + } + } + catch {$parser eval "_%@namespace import $args"} + } + ensemble { + variable parser + variable contextStack + if {[lindex $args 0] eq "create"} { + set name ::[join [lreverse $contextStack] ::] + catch { + set name [dict get [lrange $args 1 end] -command] + if {![string match ::* $name]} { + set name ::[join [lreverse $contextStack] ::]$name + } + regsub -all ::+ $name :: name + } + # create artificial proc to force an entry in the tclIndex + $parser eval [list ::proc $name {} {}] + } + } + } +} + +# AUTO MKINDEX: oo::class create name ?definition? +# Adds an entry to the auto index list for the given class name. +auto_mkindex_parser::command oo::class {op name {body ""}} { + if {$op eq "create"} { + indexEntry $name + } +} +auto_mkindex_parser::command class {op name {body ""}} { + if {$op eq "create"} { + indexEntry $name + } +} + +return diff --git a/llava/lib/tcl8.6/clock.tcl b/llava/lib/tcl8.6/clock.tcl new file mode 100644 index 0000000000000000000000000000000000000000..b9bbc2c47b9119126313da2eadf0735db2941c4c --- /dev/null +++ b/llava/lib/tcl8.6/clock.tcl @@ -0,0 +1,4551 @@ +#---------------------------------------------------------------------- +# +# clock.tcl -- +# +# This file implements the portions of the [clock] ensemble that are +# coded in Tcl. Refer to the users' manual to see the description of +# the [clock] command and its subcommands. +# +# +#---------------------------------------------------------------------- +# +# Copyright (c) 2004-2007 Kevin B. Kenny +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. +# +#---------------------------------------------------------------------- + +# We must have message catalogs that support the root locale, and we need +# access to the Registry on Windows systems. + +uplevel \#0 { + package require msgcat 1.6 + if { $::tcl_platform(platform) eq {windows} } { + if { [catch { package require registry 1.1 }] } { + namespace eval ::tcl::clock [list variable NoRegistry {}] + } + } +} + +# Put the library directory into the namespace for the ensemble so that the +# library code can find message catalogs and time zone definition files. + +namespace eval ::tcl::clock \ + [list variable LibDir [file dirname [info script]]] + +#---------------------------------------------------------------------- +# +# clock -- +# +# Manipulate times. +# +# The 'clock' command manipulates time. Refer to the user documentation for +# the available subcommands and what they do. +# +#---------------------------------------------------------------------- + +namespace eval ::tcl::clock { + + # Export the subcommands + + namespace export format + namespace export clicks + namespace export microseconds + namespace export milliseconds + namespace export scan + namespace export seconds + namespace export add + + # Import the message catalog commands that we use. + + namespace import ::msgcat::mcload + namespace import ::msgcat::mclocale + namespace import ::msgcat::mc + namespace import ::msgcat::mcpackagelocale + +} + +#---------------------------------------------------------------------- +# +# ::tcl::clock::Initialize -- +# +# Finish initializing the 'clock' subsystem +# +# Results: +# None. +# +# Side effects: +# Namespace variable in the 'clock' subsystem are initialized. +# +# The '::tcl::clock::Initialize' procedure initializes the namespace variables +# and root locale message catalog for the 'clock' subsystem. It is broken +# into a procedure rather than simply evaluated as a script so that it will be +# able to use local variables, avoiding the dangers of 'creative writing' as +# in Bug 1185933. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::Initialize {} { + + rename ::tcl::clock::Initialize {} + + variable LibDir + + # Define the Greenwich time zone + + proc InitTZData {} { + variable TZData + array unset TZData + set TZData(:Etc/GMT) { + {-9223372036854775808 0 0 GMT} + } + set TZData(:GMT) $TZData(:Etc/GMT) + set TZData(:Etc/UTC) { + {-9223372036854775808 0 0 UTC} + } + set TZData(:UTC) $TZData(:Etc/UTC) + set TZData(:localtime) {} + } + InitTZData + + mcpackagelocale set {} + ::msgcat::mcpackageconfig set mcfolder [file join $LibDir msgs] + ::msgcat::mcpackageconfig set unknowncmd "" + ::msgcat::mcpackageconfig set changecmd ChangeCurrentLocale + + # Define the message catalog for the root locale. + + ::msgcat::mcmset {} { + AM {am} + BCE {B.C.E.} + CE {C.E.} + DATE_FORMAT {%m/%d/%Y} + DATE_TIME_FORMAT {%a %b %e %H:%M:%S %Y} + DAYS_OF_WEEK_ABBREV { + Sun Mon Tue Wed Thu Fri Sat + } + DAYS_OF_WEEK_FULL { + Sunday Monday Tuesday Wednesday Thursday Friday Saturday + } + GREGORIAN_CHANGE_DATE 2299161 + LOCALE_DATE_FORMAT {%m/%d/%Y} + LOCALE_DATE_TIME_FORMAT {%a %b %e %H:%M:%S %Y} + LOCALE_ERAS {} + LOCALE_NUMERALS { + 00 01 02 03 04 05 06 07 08 09 + 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 + 30 31 32 33 34 35 36 37 38 39 + 40 41 42 43 44 45 46 47 48 49 + 50 51 52 53 54 55 56 57 58 59 + 60 61 62 63 64 65 66 67 68 69 + 70 71 72 73 74 75 76 77 78 79 + 80 81 82 83 84 85 86 87 88 89 + 90 91 92 93 94 95 96 97 98 99 + } + LOCALE_TIME_FORMAT {%H:%M:%S} + LOCALE_YEAR_FORMAT {%EC%Ey} + MONTHS_ABBREV { + Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + } + MONTHS_FULL { + January February March + April May June + July August September + October November December + } + PM {pm} + TIME_FORMAT {%H:%M:%S} + TIME_FORMAT_12 {%I:%M:%S %P} + TIME_FORMAT_24 {%H:%M} + TIME_FORMAT_24_SECS {%H:%M:%S} + } + + # Define a few Gregorian change dates for other locales. In most cases + # the change date follows a language, because a nation's colonies changed + # at the same time as the nation itself. In many cases, different + # national boundaries existed; the dominating rule is to follow the + # nation's capital. + + # Italy, Spain, Portugal, Poland + + ::msgcat::mcset it GREGORIAN_CHANGE_DATE 2299161 + ::msgcat::mcset es GREGORIAN_CHANGE_DATE 2299161 + ::msgcat::mcset pt GREGORIAN_CHANGE_DATE 2299161 + ::msgcat::mcset pl GREGORIAN_CHANGE_DATE 2299161 + + # France, Austria + + ::msgcat::mcset fr GREGORIAN_CHANGE_DATE 2299227 + + # For Belgium, we follow Southern Netherlands; Liege Diocese changed + # several weeks later. + + ::msgcat::mcset fr_BE GREGORIAN_CHANGE_DATE 2299238 + ::msgcat::mcset nl_BE GREGORIAN_CHANGE_DATE 2299238 + + # Austria + + ::msgcat::mcset de_AT GREGORIAN_CHANGE_DATE 2299527 + + # Hungary + + ::msgcat::mcset hu GREGORIAN_CHANGE_DATE 2301004 + + # Germany, Norway, Denmark (Catholic Germany changed earlier) + + ::msgcat::mcset de_DE GREGORIAN_CHANGE_DATE 2342032 + ::msgcat::mcset nb GREGORIAN_CHANGE_DATE 2342032 + ::msgcat::mcset nn GREGORIAN_CHANGE_DATE 2342032 + ::msgcat::mcset no GREGORIAN_CHANGE_DATE 2342032 + ::msgcat::mcset da GREGORIAN_CHANGE_DATE 2342032 + + # Holland (Brabant, Gelderland, Flanders, Friesland, etc. changed at + # various times) + + ::msgcat::mcset nl GREGORIAN_CHANGE_DATE 2342165 + + # Protestant Switzerland (Catholic cantons changed earlier) + + ::msgcat::mcset fr_CH GREGORIAN_CHANGE_DATE 2361342 + ::msgcat::mcset it_CH GREGORIAN_CHANGE_DATE 2361342 + ::msgcat::mcset de_CH GREGORIAN_CHANGE_DATE 2361342 + + # English speaking countries + + ::msgcat::mcset en GREGORIAN_CHANGE_DATE 2361222 + + # Sweden (had several changes onto and off of the Gregorian calendar) + + ::msgcat::mcset sv GREGORIAN_CHANGE_DATE 2361390 + + # Russia + + ::msgcat::mcset ru GREGORIAN_CHANGE_DATE 2421639 + + # Romania (Transylvania changed earlier - perhaps de_RO should show the + # earlier date?) + + ::msgcat::mcset ro GREGORIAN_CHANGE_DATE 2422063 + + # Greece + + ::msgcat::mcset el GREGORIAN_CHANGE_DATE 2423480 + + #------------------------------------------------------------------ + # + # CONSTANTS + # + #------------------------------------------------------------------ + + # Paths at which binary time zone data for the Olson libraries are known + # to reside on various operating systems + + variable ZoneinfoPaths {} + foreach path { + /usr/share/zoneinfo + /usr/share/lib/zoneinfo + /usr/lib/zoneinfo + /usr/local/etc/zoneinfo + } { + if { [file isdirectory $path] } { + lappend ZoneinfoPaths $path + } + } + + # Define the directories for time zone data and message catalogs. + + variable DataDir [file join $LibDir tzdata] + + # Number of days in the months, in common years and leap years. + + variable DaysInRomanMonthInCommonYear \ + { 31 28 31 30 31 30 31 31 30 31 30 31 } + variable DaysInRomanMonthInLeapYear \ + { 31 29 31 30 31 30 31 31 30 31 30 31 } + variable DaysInPriorMonthsInCommonYear [list 0] + variable DaysInPriorMonthsInLeapYear [list 0] + set i 0 + foreach j $DaysInRomanMonthInCommonYear { + lappend DaysInPriorMonthsInCommonYear [incr i $j] + } + set i 0 + foreach j $DaysInRomanMonthInLeapYear { + lappend DaysInPriorMonthsInLeapYear [incr i $j] + } + + # Another epoch (Hi, Jeff!) + + variable Roddenberry 1946 + + # Integer ranges + + variable MINWIDE -9223372036854775808 + variable MAXWIDE 9223372036854775807 + + # Day before Leap Day + + variable FEB_28 58 + + # Translation table to map Windows TZI onto cities, so that the Olson + # rules can apply. In some cases the mapping is ambiguous, so it's wise + # to specify $::env(TCL_TZ) rather than simply depending on the system + # time zone. + + # The keys are long lists of values obtained from the time zone + # information in the Registry. In order, the list elements are: + # Bias StandardBias DaylightBias + # StandardDate.wYear StandardDate.wMonth StandardDate.wDayOfWeek + # StandardDate.wDay StandardDate.wHour StandardDate.wMinute + # StandardDate.wSecond StandardDate.wMilliseconds + # DaylightDate.wYear DaylightDate.wMonth DaylightDate.wDayOfWeek + # DaylightDate.wDay DaylightDate.wHour DaylightDate.wMinute + # DaylightDate.wSecond DaylightDate.wMilliseconds + # The values are the names of time zones where those rules apply. There + # is considerable ambiguity in certain zones; an attempt has been made to + # make a reasonable guess, but this table needs to be taken with a grain + # of salt. + + variable WinZoneInfo [dict create {*}{ + {-43200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Kwajalein + {-39600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Midway + {-36000 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Honolulu + {-32400 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/Anchorage + {-28800 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/Los_Angeles + {-28800 0 3600 0 10 0 5 2 0 0 0 0 4 0 1 2 0 0 0} :America/Tijuana + {-25200 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/Denver + {-25200 0 3600 0 10 0 5 2 0 0 0 0 4 0 1 2 0 0 0} :America/Chihuahua + {-25200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :America/Phoenix + {-21600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :America/Regina + {-21600 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/Chicago + {-21600 0 3600 0 10 0 5 2 0 0 0 0 4 0 1 2 0 0 0} :America/Mexico_City + {-18000 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/New_York + {-18000 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :America/Indianapolis + {-14400 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :America/Caracas + {-14400 0 3600 0 3 6 2 23 59 59 999 0 10 6 2 23 59 59 999} + :America/Santiago + {-14400 0 3600 0 2 0 5 2 0 0 0 0 11 0 1 2 0 0 0} :America/Manaus + {-14400 0 3600 0 11 0 1 2 0 0 0 0 3 0 2 2 0 0 0} :America/Halifax + {-12600 0 3600 0 10 0 5 2 0 0 0 0 4 0 1 2 0 0 0} :America/St_Johns + {-10800 0 3600 0 2 0 2 2 0 0 0 0 10 0 3 2 0 0 0} :America/Sao_Paulo + {-10800 0 3600 0 10 0 5 2 0 0 0 0 4 0 1 2 0 0 0} :America/Godthab + {-10800 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :America/Buenos_Aires + {-10800 0 3600 0 2 0 5 2 0 0 0 0 11 0 1 2 0 0 0} :America/Bahia + {-10800 0 3600 0 3 0 2 2 0 0 0 0 10 0 1 2 0 0 0} :America/Montevideo + {-7200 0 3600 0 9 0 5 2 0 0 0 0 3 0 5 2 0 0 0} :America/Noronha + {-3600 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Atlantic/Azores + {-3600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Atlantic/Cape_Verde + {0 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :UTC + {0 0 3600 0 10 0 5 2 0 0 0 0 3 0 5 1 0 0 0} :Europe/London + {3600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Africa/Kinshasa + {3600 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :CET + {7200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Africa/Harare + {7200 0 3600 0 9 4 5 23 59 59 0 0 4 4 5 23 59 59 0} + :Africa/Cairo + {7200 0 3600 0 10 0 5 4 0 0 0 0 3 0 5 3 0 0 0} :Europe/Helsinki + {7200 0 3600 0 9 0 3 2 0 0 0 0 3 5 5 2 0 0 0} :Asia/Jerusalem + {7200 0 3600 0 9 0 5 1 0 0 0 0 3 0 5 0 0 0 0} :Europe/Bucharest + {7200 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Europe/Athens + {7200 0 3600 0 9 5 5 1 0 0 0 0 3 4 5 0 0 0 0} :Asia/Amman + {7200 0 3600 0 10 6 5 23 59 59 999 0 3 0 5 0 0 0 0} + :Asia/Beirut + {7200 0 -3600 0 4 0 1 2 0 0 0 0 9 0 1 2 0 0 0} :Africa/Windhoek + {10800 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Riyadh + {10800 0 3600 0 10 0 1 4 0 0 0 0 4 0 1 3 0 0 0} :Asia/Baghdad + {10800 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Europe/Moscow + {12600 0 3600 0 9 2 4 2 0 0 0 0 3 0 1 2 0 0 0} :Asia/Tehran + {14400 0 3600 0 10 0 5 5 0 0 0 0 3 0 5 4 0 0 0} :Asia/Baku + {14400 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Muscat + {14400 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Tbilisi + {16200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Kabul + {18000 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Karachi + {18000 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Yekaterinburg + {19800 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Calcutta + {20700 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Katmandu + {21600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Dhaka + {21600 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Novosibirsk + {23400 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Rangoon + {25200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Bangkok + {25200 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Krasnoyarsk + {28800 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Chongqing + {28800 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Irkutsk + {32400 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Asia/Tokyo + {32400 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Yakutsk + {34200 0 3600 0 3 0 5 3 0 0 0 0 10 0 5 2 0 0 0} :Australia/Adelaide + {34200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Australia/Darwin + {36000 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Australia/Brisbane + {36000 0 3600 0 10 0 5 3 0 0 0 0 3 0 5 2 0 0 0} :Asia/Vladivostok + {36000 0 3600 0 3 0 5 3 0 0 0 0 10 0 1 2 0 0 0} :Australia/Hobart + {36000 0 3600 0 3 0 5 3 0 0 0 0 10 0 5 2 0 0 0} :Australia/Sydney + {39600 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Noumea + {43200 0 3600 0 3 0 3 3 0 0 0 0 10 0 1 2 0 0 0} :Pacific/Auckland + {43200 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Fiji + {46800 0 3600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0} :Pacific/Tongatapu + }] + + # Groups of fields that specify the date, priorities, and code bursts that + # determine Julian Day Number given those groups. The code in [clock + # scan] will choose the highest priority (lowest numbered) set of fields + # that determines the date. + + variable DateParseActions { + + { seconds } 0 {} + + { julianDay } 1 {} + + { era century yearOfCentury month dayOfMonth } 2 { + dict set date year [expr { 100 * [dict get $date century] + + [dict get $date yearOfCentury] }] + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] \ + $changeover] + } + { era century yearOfCentury dayOfYear } 2 { + dict set date year [expr { 100 * [dict get $date century] + + [dict get $date yearOfCentury] }] + set date [GetJulianDayFromEraYearDay $date[set date {}] \ + $changeover] + } + + { century yearOfCentury month dayOfMonth } 3 { + dict set date era CE + dict set date year [expr { 100 * [dict get $date century] + + [dict get $date yearOfCentury] }] + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] \ + $changeover] + } + { century yearOfCentury dayOfYear } 3 { + dict set date era CE + dict set date year [expr { 100 * [dict get $date century] + + [dict get $date yearOfCentury] }] + set date [GetJulianDayFromEraYearDay $date[set date {}] \ + $changeover] + } + { iso8601Century iso8601YearOfCentury iso8601Week dayOfWeek } 3 { + dict set date era CE + dict set date iso8601Year \ + [expr { 100 * [dict get $date iso8601Century] + + [dict get $date iso8601YearOfCentury] }] + set date [GetJulianDayFromEraYearWeekDay $date[set date {}] \ + $changeover] + } + + { yearOfCentury month dayOfMonth } 4 { + set date [InterpretTwoDigitYear $date[set date {}] $baseTime] + dict set date era CE + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] \ + $changeover] + } + { yearOfCentury dayOfYear } 4 { + set date [InterpretTwoDigitYear $date[set date {}] $baseTime] + dict set date era CE + set date [GetJulianDayFromEraYearDay $date[set date {}] \ + $changeover] + } + { iso8601YearOfCentury iso8601Week dayOfWeek } 4 { + set date [InterpretTwoDigitYear \ + $date[set date {}] $baseTime \ + iso8601YearOfCentury iso8601Year] + dict set date era CE + set date [GetJulianDayFromEraYearWeekDay $date[set date {}] \ + $changeover] + } + + { month dayOfMonth } 5 { + set date [AssignBaseYear $date[set date {}] \ + $baseTime $timeZone $changeover] + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] \ + $changeover] + } + { dayOfYear } 5 { + set date [AssignBaseYear $date[set date {}] \ + $baseTime $timeZone $changeover] + set date [GetJulianDayFromEraYearDay $date[set date {}] \ + $changeover] + } + { iso8601Week dayOfWeek } 5 { + set date [AssignBaseIso8601Year $date[set date {}] \ + $baseTime $timeZone $changeover] + set date [GetJulianDayFromEraYearWeekDay $date[set date {}] \ + $changeover] + } + + { dayOfMonth } 6 { + set date [AssignBaseMonth $date[set date {}] \ + $baseTime $timeZone $changeover] + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] \ + $changeover] + } + + { dayOfWeek } 7 { + set date [AssignBaseWeek $date[set date {}] \ + $baseTime $timeZone $changeover] + set date [GetJulianDayFromEraYearWeekDay $date[set date {}] \ + $changeover] + } + + {} 8 { + set date [AssignBaseJulianDay $date[set date {}] \ + $baseTime $timeZone $changeover] + } + } + + # Groups of fields that specify time of day, priorities, and code that + # processes them + + variable TimeParseActions { + + seconds 1 {} + + { hourAMPM minute second amPmIndicator } 2 { + dict set date secondOfDay [InterpretHMSP $date] + } + { hour minute second } 2 { + dict set date secondOfDay [InterpretHMS $date] + } + + { hourAMPM minute amPmIndicator } 3 { + dict set date second 0 + dict set date secondOfDay [InterpretHMSP $date] + } + { hour minute } 3 { + dict set date second 0 + dict set date secondOfDay [InterpretHMS $date] + } + + { hourAMPM amPmIndicator } 4 { + dict set date minute 0 + dict set date second 0 + dict set date secondOfDay [InterpretHMSP $date] + } + { hour } 4 { + dict set date minute 0 + dict set date second 0 + dict set date secondOfDay [InterpretHMS $date] + } + + { } 5 { + dict set date secondOfDay 0 + } + } + + # Legacy time zones, used primarily for parsing RFC822 dates. + + variable LegacyTimeZone [dict create \ + gmt +0000 \ + ut +0000 \ + utc +0000 \ + bst +0100 \ + wet +0000 \ + wat -0100 \ + at -0200 \ + nft -0330 \ + nst -0330 \ + ndt -0230 \ + ast -0400 \ + adt -0300 \ + est -0500 \ + edt -0400 \ + cst -0600 \ + cdt -0500 \ + mst -0700 \ + mdt -0600 \ + pst -0800 \ + pdt -0700 \ + yst -0900 \ + ydt -0800 \ + hst -1000 \ + hdt -0900 \ + cat -1000 \ + ahst -1000 \ + nt -1100 \ + idlw -1200 \ + cet +0100 \ + cest +0200 \ + met +0100 \ + mewt +0100 \ + mest +0200 \ + swt +0100 \ + sst +0200 \ + fwt +0100 \ + fst +0200 \ + eet +0200 \ + eest +0300 \ + bt +0300 \ + it +0330 \ + zp4 +0400 \ + zp5 +0500 \ + ist +0530 \ + zp6 +0600 \ + wast +0700 \ + wadt +0800 \ + jt +0730 \ + cct +0800 \ + jst +0900 \ + kst +0900 \ + cast +0930 \ + jdt +1000 \ + kdt +1000 \ + cadt +1030 \ + east +1000 \ + eadt +1030 \ + gst +1000 \ + nzt +1200 \ + nzst +1200 \ + nzdt +1300 \ + idle +1200 \ + a +0100 \ + b +0200 \ + c +0300 \ + d +0400 \ + e +0500 \ + f +0600 \ + g +0700 \ + h +0800 \ + i +0900 \ + k +1000 \ + l +1100 \ + m +1200 \ + n -0100 \ + o -0200 \ + p -0300 \ + q -0400 \ + r -0500 \ + s -0600 \ + t -0700 \ + u -0800 \ + v -0900 \ + w -1000 \ + x -1100 \ + y -1200 \ + z +0000 \ + ] + + # Caches + + variable LocaleNumeralCache {}; # Dictionary whose keys are locale + # names and whose values are pairs + # comprising regexes matching numerals + # in the given locales and dictionaries + # mapping the numerals to their numeric + # values. + # variable CachedSystemTimeZone; # If 'CachedSystemTimeZone' exists, + # it contains the value of the + # system time zone, as determined from + # the environment. + variable TimeZoneBad {}; # Dictionary whose keys are time zone + # names and whose values are 1 if + # the time zone is unknown and 0 + # if it is known. + variable TZData; # Array whose keys are time zone names + # and whose values are lists of quads + # comprising start time, UTC offset, + # Daylight Saving Time indicator, and + # time zone abbreviation. + variable FormatProc; # Array mapping format group + # and locale to the name of a procedure + # that renders the given format +} +::tcl::clock::Initialize + +#---------------------------------------------------------------------- +# +# clock format -- +# +# Formats a count of seconds since the Posix Epoch as a time of day. +# +# The 'clock format' command formats times of day for output. Refer to the +# user documentation to see what it does. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::format { args } { + + variable FormatProc + variable TZData + + lassign [ParseFormatArgs {*}$args] format locale timezone + set locale [string tolower $locale] + set clockval [lindex $args 0] + + # Get the data for time changes in the given zone + + if {$timezone eq ""} { + set timezone [GetSystemTimeZone] + } + if {![info exists TZData($timezone)]} { + if {[catch {SetupTimeZone $timezone} retval opts]} { + dict unset opts -errorinfo + return -options $opts $retval + } + } + + # Build a procedure to format the result. Cache the built procedure's name + # in the 'FormatProc' array to avoid losing its internal representation, + # which contains the name resolution. + + set procName formatproc'$format'$locale + set procName [namespace current]::[string map {: {\:} \\ {\\}} $procName] + if {[info exists FormatProc($procName)]} { + set procName $FormatProc($procName) + } else { + set FormatProc($procName) \ + [ParseClockFormatFormat $procName $format $locale] + } + + return [$procName $clockval $timezone] + +} + +#---------------------------------------------------------------------- +# +# ParseClockFormatFormat -- +# +# Builds and caches a procedure that formats a time value. +# +# Parameters: +# format -- Format string to use +# locale -- Locale in which the format string is to be interpreted +# +# Results: +# Returns the name of the newly-built procedure. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ParseClockFormatFormat {procName format locale} { + + if {[namespace which $procName] ne {}} { + return $procName + } + + # Map away the locale-dependent composite format groups + + EnterLocale $locale + + # Change locale if a fresh locale has been given on the command line. + + try { + return [ParseClockFormatFormat2 $format $locale $procName] + } trap CLOCK {result opts} { + dict unset opts -errorinfo + return -options $opts $result + } +} + +proc ::tcl::clock::ParseClockFormatFormat2 {format locale procName} { + set didLocaleEra 0 + set didLocaleNumerals 0 + set preFormatCode \ + [string map [list @GREGORIAN_CHANGE_DATE@ \ + [mc GREGORIAN_CHANGE_DATE]] \ + { + variable TZData + set date [GetDateFields $clockval \ + $TZData($timezone) \ + @GREGORIAN_CHANGE_DATE@] + }] + set formatString {} + set substituents {} + set state {} + + set format [LocalizeFormat $locale $format] + + foreach char [split $format {}] { + switch -exact -- $state { + {} { + if { [string equal % $char] } { + set state percent + } else { + append formatString $char + } + } + percent { # Character following a '%' character + set state {} + switch -exact -- $char { + % { # A literal character, '%' + append formatString %% + } + a { # Day of week, abbreviated + append formatString %s + append substituents \ + [string map \ + [list @DAYS_OF_WEEK_ABBREV@ \ + [list [mc DAYS_OF_WEEK_ABBREV]]] \ + { [lindex @DAYS_OF_WEEK_ABBREV@ \ + [expr {[dict get $date dayOfWeek] \ + % 7}]]}] + } + A { # Day of week, spelt out. + append formatString %s + append substituents \ + [string map \ + [list @DAYS_OF_WEEK_FULL@ \ + [list [mc DAYS_OF_WEEK_FULL]]] \ + { [lindex @DAYS_OF_WEEK_FULL@ \ + [expr {[dict get $date dayOfWeek] \ + % 7}]]}] + } + b - h { # Name of month, abbreviated. + append formatString %s + append substituents \ + [string map \ + [list @MONTHS_ABBREV@ \ + [list [mc MONTHS_ABBREV]]] \ + { [lindex @MONTHS_ABBREV@ \ + [expr {[dict get $date month]-1}]]}] + } + B { # Name of month, spelt out + append formatString %s + append substituents \ + [string map \ + [list @MONTHS_FULL@ \ + [list [mc MONTHS_FULL]]] \ + { [lindex @MONTHS_FULL@ \ + [expr {[dict get $date month]-1}]]}] + } + C { # Century number + append formatString %02d + append substituents \ + { [expr {[dict get $date year] / 100}]} + } + d { # Day of month, with leading zero + append formatString %02d + append substituents { [dict get $date dayOfMonth]} + } + e { # Day of month, without leading zero + append formatString %2d + append substituents { [dict get $date dayOfMonth]} + } + E { # Format group in a locale-dependent + # alternative era + set state percentE + if {!$didLocaleEra} { + append preFormatCode \ + [string map \ + [list @LOCALE_ERAS@ \ + [list [mc LOCALE_ERAS]]] \ + { + set date [GetLocaleEra \ + $date[set date {}] \ + @LOCALE_ERAS@]}] \n + set didLocaleEra 1 + } + if {!$didLocaleNumerals} { + append preFormatCode \ + [list set localeNumerals \ + [mc LOCALE_NUMERALS]] \n + set didLocaleNumerals 1 + } + } + g { # Two-digit year relative to ISO8601 + # week number + append formatString %02d + append substituents \ + { [expr { [dict get $date iso8601Year] % 100 }]} + } + G { # Four-digit year relative to ISO8601 + # week number + append formatString %02d + append substituents { [dict get $date iso8601Year]} + } + H { # Hour in the 24-hour day, leading zero + append formatString %02d + append substituents \ + { [expr { [dict get $date localSeconds] \ + / 3600 % 24}]} + } + I { # Hour AM/PM, with leading zero + append formatString %02d + append substituents \ + { [expr { ( ( ( [dict get $date localSeconds] \ + % 86400 ) \ + + 86400 \ + - 3600 ) \ + / 3600 ) \ + % 12 + 1 }] } + } + j { # Day of year (001-366) + append formatString %03d + append substituents { [dict get $date dayOfYear]} + } + J { # Julian Day Number + append formatString %07ld + append substituents { [dict get $date julianDay]} + } + k { # Hour (0-23), no leading zero + append formatString %2d + append substituents \ + { [expr { [dict get $date localSeconds] + / 3600 + % 24 }]} + } + l { # Hour (12-11), no leading zero + append formatString %2d + append substituents \ + { [expr { ( ( ( [dict get $date localSeconds] + % 86400 ) + + 86400 + - 3600 ) + / 3600 ) + % 12 + 1 }]} + } + m { # Month number, leading zero + append formatString %02d + append substituents { [dict get $date month]} + } + M { # Minute of the hour, leading zero + append formatString %02d + append substituents \ + { [expr { [dict get $date localSeconds] + / 60 + % 60 }]} + } + n { # A literal newline + append formatString \n + } + N { # Month number, no leading zero + append formatString %2d + append substituents { [dict get $date month]} + } + O { # A format group in the locale's + # alternative numerals + set state percentO + if {!$didLocaleNumerals} { + append preFormatCode \ + [list set localeNumerals \ + [mc LOCALE_NUMERALS]] \n + set didLocaleNumerals 1 + } + } + p { # Localized 'AM' or 'PM' indicator + # converted to uppercase + append formatString %s + append preFormatCode \ + [list set AM [string toupper [mc AM]]] \n \ + [list set PM [string toupper [mc PM]]] \n + append substituents \ + { [expr {(([dict get $date localSeconds] + % 86400) < 43200) ? + $AM : $PM}]} + } + P { # Localized 'AM' or 'PM' indicator + append formatString %s + append preFormatCode \ + [list set am [mc AM]] \n \ + [list set pm [mc PM]] \n + append substituents \ + { [expr {(([dict get $date localSeconds] + % 86400) < 43200) ? + $am : $pm}]} + + } + Q { # Hi, Jeff! + append formatString %s + append substituents { [FormatStarDate $date]} + } + s { # Seconds from the Posix Epoch + append formatString %s + append substituents { [dict get $date seconds]} + } + S { # Second of the minute, with + # leading zero + append formatString %02d + append substituents \ + { [expr { [dict get $date localSeconds] + % 60 }]} + } + t { # A literal tab character + append formatString \t + } + u { # Day of the week (1-Monday, 7-Sunday) + append formatString %1d + append substituents { [dict get $date dayOfWeek]} + } + U { # Week of the year (00-53). The + # first Sunday of the year is the + # first day of week 01 + append formatString %02d + append preFormatCode { + set dow [dict get $date dayOfWeek] + if { $dow == 7 } { + set dow 0 + } + incr dow + set UweekNumber \ + [expr { ( [dict get $date dayOfYear] + - $dow + 7 ) + / 7 }] + } + append substituents { $UweekNumber} + } + V { # The ISO8601 week number + append formatString %02d + append substituents { [dict get $date iso8601Week]} + } + w { # Day of the week (0-Sunday, + # 6-Saturday) + append formatString %1d + append substituents \ + { [expr { [dict get $date dayOfWeek] % 7 }]} + } + W { # Week of the year (00-53). The first + # Monday of the year is the first day + # of week 01. + append preFormatCode { + set WweekNumber \ + [expr { ( [dict get $date dayOfYear] + - [dict get $date dayOfWeek] + + 7 ) + / 7 }] + } + append formatString %02d + append substituents { $WweekNumber} + } + y { # The two-digit year of the century + append formatString %02d + append substituents \ + { [expr { [dict get $date year] % 100 }]} + } + Y { # The four-digit year + append formatString %04d + append substituents { [dict get $date year]} + } + z { # The time zone as hours and minutes + # east (+) or west (-) of Greenwich + append formatString %s + append substituents { [FormatNumericTimeZone \ + [dict get $date tzOffset]]} + } + Z { # The name of the time zone + append formatString %s + append substituents { [dict get $date tzName]} + } + % { # A literal percent character + append formatString %% + } + default { # An unknown escape sequence + append formatString %% $char + } + } + } + percentE { # Character following %E + set state {} + switch -exact -- $char { + E { + append formatString %s + append substituents { } \ + [string map \ + [list @BCE@ [list [mc BCE]] \ + @CE@ [list [mc CE]]] \ + {[dict get {BCE @BCE@ CE @CE@} \ + [dict get $date era]]}] + } + C { # Locale-dependent era + append formatString %s + append substituents { [dict get $date localeEra]} + } + y { # Locale-dependent year of the era + append preFormatCode { + set y [dict get $date localeYear] + if { $y >= 0 && $y < 100 } { + set Eyear [lindex $localeNumerals $y] + } else { + set Eyear $y + } + } + append formatString %s + append substituents { $Eyear} + } + default { # Unknown %E format group + append formatString %%E $char + } + } + } + percentO { # Character following %O + set state {} + switch -exact -- $char { + d - e { # Day of the month in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [dict get $date dayOfMonth]]} + } + H - k { # Hour of the day in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { [dict get $date localSeconds] + / 3600 + % 24 }]]} + } + I - l { # Hour (12-11) AM/PM in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { ( ( ( [dict get $date localSeconds] + % 86400 ) + + 86400 + - 3600 ) + / 3600 ) + % 12 + 1 }]]} + } + m { # Month number in alternative numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals [dict get $date month]]} + } + M { # Minute of the hour in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { [dict get $date localSeconds] + / 60 + % 60 }]]} + } + S { # Second of the minute in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { [dict get $date localSeconds] + % 60 }]]} + } + u { # Day of the week (Monday=1,Sunday=7) + # in alternative numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [dict get $date dayOfWeek]]} + } + w { # Day of the week (Sunday=0,Saturday=6) + # in alternative numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { [dict get $date dayOfWeek] % 7 }]]} + } + y { # Year of the century in alternative + # numerals + append formatString %s + append substituents \ + { [lindex $localeNumerals \ + [expr { [dict get $date year] % 100 }]]} + } + default { # Unknown format group + append formatString %%O $char + } + } + } + } + } + + # Clean up any improperly terminated groups + + switch -exact -- $state { + percent { + append formatString %% + } + percentE { + append retval %%E + } + percentO { + append retval %%O + } + } + + proc $procName {clockval timezone} " + $preFormatCode + return \[::format [list $formatString] $substituents\] + " + + # puts [list $procName [info args $procName] [info body $procName]] + + return $procName +} + +#---------------------------------------------------------------------- +# +# clock scan -- +# +# Inputs a count of seconds since the Posix Epoch as a time of day. +# +# The 'clock scan' command scans times of day on input. Refer to the user +# documentation to see what it does. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::scan { args } { + + set format {} + + # Check the count of args + + if { [llength $args] < 1 || [llength $args] % 2 != 1 } { + set cmdName "clock scan" + return -code error \ + -errorcode [list CLOCK wrongNumArgs] \ + "wrong \# args: should be\ + \"$cmdName string\ + ?-base seconds?\ + ?-format string? ?-gmt boolean?\ + ?-locale LOCALE? ?-timezone ZONE?\"" + } + + # Set defaults + + set base [clock seconds] + set string [lindex $args 0] + set format {} + set gmt 0 + set locale c + set timezone [GetSystemTimeZone] + + # Pick up command line options. + + foreach { flag value } [lreplace $args 0 0] { + switch -exact -- $flag { + -b - -ba - -bas - -base { + set base $value + } + -f - -fo - -for - -form - -forma - -format { + set saw(-format) {} + set format $value + } + -g - -gm - -gmt { + set saw(-gmt) {} + set gmt $value + } + -l - -lo - -loc - -loca - -local - -locale { + set saw(-locale) {} + set locale [string tolower $value] + } + -t - -ti - -tim - -time - -timez - -timezo - -timezon - -timezone { + set saw(-timezone) {} + set timezone $value + } + default { + return -code error \ + -errorcode [list CLOCK badOption $flag] \ + "bad option \"$flag\",\ + must be -base, -format, -gmt, -locale or -timezone" + } + } + } + + # Check options for validity + + if { [info exists saw(-gmt)] && [info exists saw(-timezone)] } { + return -code error \ + -errorcode [list CLOCK gmtWithTimezone] \ + "cannot use -gmt and -timezone in same call" + } + if { [catch { expr { wide($base) } } result] } { + return -code error "expected integer but got \"$base\"" + } + if { ![string is boolean -strict $gmt] } { + return -code error "expected boolean value but got \"$gmt\"" + } elseif { $gmt } { + set timezone :GMT + } + + if { ![info exists saw(-format)] } { + # Perhaps someday we'll localize the legacy code. Right now, it's not + # localized. + if { [info exists saw(-locale)] } { + return -code error \ + -errorcode [list CLOCK flagWithLegacyFormat] \ + "legacy \[clock scan\] does not support -locale" + + } + return [FreeScan $string $base $timezone $locale] + } + + # Change locale if a fresh locale has been given on the command line. + + EnterLocale $locale + + try { + # Map away the locale-dependent composite format groups + + set scanner [ParseClockScanFormat $format $locale] + return [$scanner $string $base $timezone] + } trap CLOCK {result opts} { + # Conceal location of generation of expected errors + dict unset opts -errorinfo + return -options $opts $result + } +} + +#---------------------------------------------------------------------- +# +# FreeScan -- +# +# Scans a time in free format +# +# Parameters: +# string - String containing the time to scan +# base - Base time, expressed in seconds from the Epoch +# timezone - Default time zone in which the time will be expressed +# locale - (Unused) Name of the locale where the time will be scanned. +# +# Results: +# Returns the date and time extracted from the string in seconds from +# the epoch +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::FreeScan { string base timezone locale } { + + variable TZData + + # Get the data for time changes in the given zone + + try { + SetupTimeZone $timezone + } on error {retval opts} { + dict unset opts -errorinfo + return -options $opts $retval + } + + # Extract year, month and day from the base time for the parser to use as + # defaults + + set date [GetDateFields $base $TZData($timezone) 2361222] + dict set date secondOfDay [expr { + [dict get $date localSeconds] % 86400 + }] + + # Parse the date. The parser will return a list comprising date, time, + # time zone, relative month/day/seconds, relative weekday, ordinal month. + + try { + set scanned [Oldscan $string \ + [dict get $date year] \ + [dict get $date month] \ + [dict get $date dayOfMonth]] + lassign $scanned \ + parseDate parseTime parseZone parseRel \ + parseWeekday parseOrdinalMonth + } on error message { + return -code error \ + "unable to convert date-time string \"$string\": $message" + } + + # If the caller supplied a date in the string, update the 'date' dict with + # the value. If the caller didn't specify a time with the date, default to + # midnight. + + if { [llength $parseDate] > 0 } { + lassign $parseDate y m d + if { $y < 100 } { + if { $y >= 39 } { + incr y 1900 + } else { + incr y 2000 + } + } + dict set date era CE + dict set date year $y + dict set date month $m + dict set date dayOfMonth $d + if { $parseTime eq {} } { + set parseTime 0 + } + } + + # If the caller supplied a time zone in the string, it comes back as a + # two-element list; the first element is the number of minutes east of + # Greenwich, and the second is a Daylight Saving Time indicator (1 == yes, + # 0 == no, -1 == unknown). We make it into a time zone indicator of + # +-hhmm. + + if { [llength $parseZone] > 0 } { + lassign $parseZone minEast dstFlag + set timezone [FormatNumericTimeZone \ + [expr { 60 * $minEast + 3600 * $dstFlag }]] + SetupTimeZone $timezone + } + dict set date tzName $timezone + + # Assemble date, time, zone into seconds-from-epoch + + set date [GetJulianDayFromEraYearMonthDay $date[set date {}] 2361222] + if { $parseTime ne {} } { + dict set date secondOfDay $parseTime + } elseif { [llength $parseWeekday] != 0 + || [llength $parseOrdinalMonth] != 0 + || ( [llength $parseRel] != 0 + && ( [lindex $parseRel 0] != 0 + || [lindex $parseRel 1] != 0 ) ) } { + dict set date secondOfDay 0 + } + + dict set date localSeconds [expr { + -210866803200 + + ( 86400 * wide([dict get $date julianDay]) ) + + [dict get $date secondOfDay] + }] + dict set date tzName $timezone + set date [ConvertLocalToUTC $date[set date {}] $TZData($timezone) 2361222] + set seconds [dict get $date seconds] + + # Do relative times + + if { [llength $parseRel] > 0 } { + lassign $parseRel relMonth relDay relSecond + set seconds [add $seconds \ + $relMonth months $relDay days $relSecond seconds \ + -timezone $timezone -locale $locale] + } + + # Do relative weekday + + if { [llength $parseWeekday] > 0 } { + lassign $parseWeekday dayOrdinal dayOfWeek + set date2 [GetDateFields $seconds $TZData($timezone) 2361222] + dict set date2 era CE + set jdwkday [WeekdayOnOrBefore $dayOfWeek [expr { + [dict get $date2 julianDay] + 6 + }]] + incr jdwkday [expr { 7 * $dayOrdinal }] + if { $dayOrdinal > 0 } { + incr jdwkday -7 + } + dict set date2 secondOfDay \ + [expr { [dict get $date2 localSeconds] % 86400 }] + dict set date2 julianDay $jdwkday + dict set date2 localSeconds [expr { + -210866803200 + + ( 86400 * wide([dict get $date2 julianDay]) ) + + [dict get $date secondOfDay] + }] + dict set date2 tzName $timezone + set date2 [ConvertLocalToUTC $date2[set date2 {}] $TZData($timezone) \ + 2361222] + set seconds [dict get $date2 seconds] + + } + + # Do relative month + + if { [llength $parseOrdinalMonth] > 0 } { + lassign $parseOrdinalMonth monthOrdinal monthNumber + if { $monthOrdinal > 0 } { + set monthDiff [expr { $monthNumber - [dict get $date month] }] + if { $monthDiff <= 0 } { + incr monthDiff 12 + } + incr monthOrdinal -1 + } else { + set monthDiff [expr { [dict get $date month] - $monthNumber }] + if { $monthDiff >= 0 } { + incr monthDiff -12 + } + incr monthOrdinal + } + set seconds [add $seconds $monthOrdinal years $monthDiff months \ + -timezone $timezone -locale $locale] + } + + return $seconds +} + + +#---------------------------------------------------------------------- +# +# ParseClockScanFormat -- +# +# Parses a format string given to [clock scan -format] +# +# Parameters: +# formatString - The format being parsed +# locale - The current locale +# +# Results: +# Constructs and returns a procedure that accepts the string being +# scanned, the base time, and the time zone. The procedure will either +# return the scanned time or else throw an error that should be rethrown +# to the caller of [clock scan] +# +# Side effects: +# The given procedure is defined in the ::tcl::clock namespace. Scan +# procedures are not deleted once installed. +# +# Why do we parse dates by defining a procedure to parse them? The reason is +# that by doing so, we have one convenient place to cache all the information: +# the regular expressions that match the patterns (which will be compiled), +# the code that assembles the date information, everything lands in one place. +# In this way, when a given format is reused at run time, all the information +# of how to apply it is available in a single place. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ParseClockScanFormat {formatString locale} { + # Check whether the format has been parsed previously, and return the + # existing recognizer if it has. + + set procName scanproc'$formatString'$locale + set procName [namespace current]::[string map {: {\:} \\ {\\}} $procName] + if { [namespace which $procName] != {} } { + return $procName + } + + variable DateParseActions + variable TimeParseActions + + # Localize the %x, %X, etc. groups + + set formatString [LocalizeFormat $locale $formatString] + + # Condense whitespace + + regsub -all {[[:space:]]+} $formatString { } formatString + + # Walk through the groups of the format string. In this loop, we + # accumulate: + # - a regular expression that matches the string, + # - the count of capturing brackets in the regexp + # - a set of code that post-processes the fields captured by the regexp, + # - a dictionary whose keys are the names of fields that are present + # in the format string. + + set re {^[[:space:]]*} + set captureCount 0 + set postcode {} + set fieldSet [dict create] + set fieldCount 0 + set postSep {} + set state {} + + foreach c [split $formatString {}] { + switch -exact -- $state { + {} { + if { $c eq "%" } { + set state % + } elseif { $c eq " " } { + append re {[[:space:]]+} + } else { + if { ! [string is alnum $c] } { + append re "\\" + } + append re $c + } + } + % { + set state {} + switch -exact -- $c { + % { + append re % + } + { } { + append re "\[\[:space:\]\]*" + } + a - A { # Day of week, in words + set l {} + foreach \ + i {7 1 2 3 4 5 6} \ + abr [mc DAYS_OF_WEEK_ABBREV] \ + full [mc DAYS_OF_WEEK_FULL] { + dict set l [string tolower $abr] $i + dict set l [string tolower $full] $i + incr i + } + lassign [UniquePrefixRegexp $l] regex lookup + append re ( $regex ) + dict set fieldSet dayOfWeek [incr fieldCount] + append postcode "dict set date dayOfWeek \[" \ + "dict get " [list $lookup] " " \ + \[ {string tolower $field} [incr captureCount] \] \ + "\]\n" + } + b - B - h { # Name of month + set i 0 + set l {} + foreach \ + abr [mc MONTHS_ABBREV] \ + full [mc MONTHS_FULL] { + incr i + dict set l [string tolower $abr] $i + dict set l [string tolower $full] $i + } + lassign [UniquePrefixRegexp $l] regex lookup + append re ( $regex ) + dict set fieldSet month [incr fieldCount] + append postcode "dict set date month \[" \ + "dict get " [list $lookup] \ + " " \[ {string tolower $field} \ + [incr captureCount] \] \ + "\]\n" + } + C { # Gregorian century + append re \\s*(\\d\\d?) + dict set fieldSet century [incr fieldCount] + append postcode "dict set date century \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + d - e { # Day of month + append re \\s*(\\d\\d?) + dict set fieldSet dayOfMonth [incr fieldCount] + append postcode "dict set date dayOfMonth \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + E { # Prefix for locale-specific codes + set state %E + } + g { # ISO8601 2-digit year + append re \\s*(\\d\\d) + dict set fieldSet iso8601YearOfCentury \ + [incr fieldCount] + append postcode \ + "dict set date iso8601YearOfCentury \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + G { # ISO8601 4-digit year + append re \\s*(\\d\\d)(\\d\\d) + dict set fieldSet iso8601Century [incr fieldCount] + dict set fieldSet iso8601YearOfCentury \ + [incr fieldCount] + append postcode \ + "dict set date iso8601Century \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" \ + "dict set date iso8601YearOfCentury \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + H - k { # Hour of day + append re \\s*(\\d\\d?) + dict set fieldSet hour [incr fieldCount] + append postcode "dict set date hour \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + I - l { # Hour, AM/PM + append re \\s*(\\d\\d?) + dict set fieldSet hourAMPM [incr fieldCount] + append postcode "dict set date hourAMPM \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + j { # Day of year + append re \\s*(\\d\\d?\\d?) + dict set fieldSet dayOfYear [incr fieldCount] + append postcode "dict set date dayOfYear \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + J { # Julian Day Number + append re \\s*(\\d+) + dict set fieldSet julianDay [incr fieldCount] + append postcode "dict set date julianDay \[" \ + "::scan \$field" [incr captureCount] " %ld" \ + "\]\n" + } + m - N { # Month number + append re \\s*(\\d\\d?) + dict set fieldSet month [incr fieldCount] + append postcode "dict set date month \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + M { # Minute + append re \\s*(\\d\\d?) + dict set fieldSet minute [incr fieldCount] + append postcode "dict set date minute \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + n { # Literal newline + append re \\n + } + O { # Prefix for locale numerics + set state %O + } + p - P { # AM/PM indicator + set l [list [string tolower [mc AM]] 0 \ + [string tolower [mc PM]] 1] + lassign [UniquePrefixRegexp $l] regex lookup + append re ( $regex ) + dict set fieldSet amPmIndicator [incr fieldCount] + append postcode "dict set date amPmIndicator \[" \ + "dict get " [list $lookup] " \[string tolower " \ + "\$field" \ + [incr captureCount] \ + "\]\]\n" + } + Q { # Hi, Jeff! + append re {Stardate\s+([-+]?\d+)(\d\d\d)[.](\d)} + incr captureCount + dict set fieldSet seconds [incr fieldCount] + append postcode {dict set date seconds } \[ \ + {ParseStarDate $field} [incr captureCount] \ + { $field} [incr captureCount] \ + { $field} [incr captureCount] \ + \] \n + } + s { # Seconds from Posix Epoch + # This next case is insanely difficult, because it's + # problematic to determine whether the field is + # actually within the range of a wide integer. + append re {\s*([-+]?\d+)} + dict set fieldSet seconds [incr fieldCount] + append postcode {dict set date seconds } \[ \ + {ScanWide $field} [incr captureCount] \] \n + } + S { # Second + append re \\s*(\\d\\d?) + dict set fieldSet second [incr fieldCount] + append postcode "dict set date second \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + t { # Literal tab character + append re \\t + } + u - w { # Day number within week, 0 or 7 == Sun + # 1=Mon, 6=Sat + append re \\s*(\\d) + dict set fieldSet dayOfWeek [incr fieldCount] + append postcode {::scan $field} [incr captureCount] \ + { %d dow} \n \ + { + if { $dow == 0 } { + set dow 7 + } elseif { $dow > 7 } { + return -code error \ + -errorcode [list CLOCK badDayOfWeek] \ + "day of week is greater than 7" + } + dict set date dayOfWeek $dow + } + } + U { # Week of year. The first Sunday of + # the year is the first day of week + # 01. No scan rule uses this group. + append re \\s*\\d\\d? + } + V { # Week of ISO8601 year + + append re \\s*(\\d\\d?) + dict set fieldSet iso8601Week [incr fieldCount] + append postcode "dict set date iso8601Week \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + W { # Week of the year (00-53). The first + # Monday of the year is the first day + # of week 01. No scan rule uses this + # group. + append re \\s*\\d\\d? + } + y { # Two-digit Gregorian year + append re \\s*(\\d\\d?) + dict set fieldSet yearOfCentury [incr fieldCount] + append postcode "dict set date yearOfCentury \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + Y { # 4-digit Gregorian year + append re \\s*(\\d\\d)(\\d\\d) + dict set fieldSet century [incr fieldCount] + dict set fieldSet yearOfCentury [incr fieldCount] + append postcode \ + "dict set date century \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" \ + "dict set date yearOfCentury \[" \ + "::scan \$field" [incr captureCount] " %d" \ + "\]\n" + } + z - Z { # Time zone name + append re {(?:([-+]\d\d(?::?\d\d(?::?\d\d)?)?)|([[:alnum:]]{1,4}))} + dict set fieldSet tzName [incr fieldCount] + append postcode \ + {if } \{ { $field} [incr captureCount] \ + { ne "" } \} { } \{ \n \ + {dict set date tzName $field} \ + $captureCount \n \ + \} { else } \{ \n \ + {dict set date tzName } \[ \ + {ConvertLegacyTimeZone $field} \ + [incr captureCount] \] \n \ + \} \n \ + } + % { # Literal percent character + append re % + } + default { + append re % + if { ! [string is alnum $c] } { + append re \\ + } + append re $c + } + } + } + %E { + switch -exact -- $c { + C { # Locale-dependent era + set d {} + foreach triple [mc LOCALE_ERAS] { + lassign $triple t symbol year + dict set d [string tolower $symbol] $year + } + lassign [UniquePrefixRegexp $d] regex lookup + append re (?: $regex ) + } + E { + set l {} + dict set l [string tolower [mc BCE]] BCE + dict set l [string tolower [mc CE]] CE + dict set l b.c.e. BCE + dict set l c.e. CE + dict set l b.c. BCE + dict set l a.d. CE + lassign [UniquePrefixRegexp $l] regex lookup + append re ( $regex ) + dict set fieldSet era [incr fieldCount] + append postcode "dict set date era \["\ + "dict get " [list $lookup] \ + { } \[ {string tolower $field} \ + [incr captureCount] \] \ + "\]\n" + } + y { # Locale-dependent year of the era + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + incr captureCount + } + default { + append re %E + if { ! [string is alnum $c] } { + append re \\ + } + append re $c + } + } + set state {} + } + %O { + switch -exact -- $c { + d - e { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet dayOfMonth [incr fieldCount] + append postcode "dict set date dayOfMonth \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + H - k { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet hour [incr fieldCount] + append postcode "dict set date hour \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + I - l { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet hourAMPM [incr fieldCount] + append postcode "dict set date hourAMPM \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + m { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet month [incr fieldCount] + append postcode "dict set date month \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + M { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet minute [incr fieldCount] + append postcode "dict set date minute \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + S { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet second [incr fieldCount] + append postcode "dict set date second \[" \ + "dict get " [list $lookup] " \$field" \ + [incr captureCount] \ + "\]\n" + } + u - w { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet dayOfWeek [incr fieldCount] + append postcode "set dow \[dict get " [list $lookup] \ + { $field} [incr captureCount] \] \n \ + { + if { $dow == 0 } { + set dow 7 + } elseif { $dow > 7 } { + return -code error \ + -errorcode [list CLOCK badDayOfWeek] \ + "day of week is greater than 7" + } + dict set date dayOfWeek $dow + } + } + y { + lassign [LocaleNumeralMatcher $locale] regex lookup + append re $regex + dict set fieldSet yearOfCentury [incr fieldCount] + append postcode {dict set date yearOfCentury } \[ \ + {dict get } [list $lookup] { $field} \ + [incr captureCount] \] \n + } + default { + append re %O + if { ! [string is alnum $c] } { + append re \\ + } + append re $c + } + } + set state {} + } + } + } + + # Clean up any unfinished format groups + + append re $state \\s*\$ + + # Build the procedure + + set procBody {} + append procBody "variable ::tcl::clock::TZData" \n + append procBody "if \{ !\[ regexp -nocase [list $re] \$string ->" + for { set i 1 } { $i <= $captureCount } { incr i } { + append procBody " " field $i + } + append procBody "\] \} \{" \n + append procBody { + return -code error -errorcode [list CLOCK badInputString] \ + {input string does not match supplied format} + } + append procBody \}\n + append procBody "set date \[dict create\]" \n + append procBody {dict set date tzName $timeZone} \n + append procBody $postcode + append procBody [list set changeover [mc GREGORIAN_CHANGE_DATE]] \n + + # Set up the time zone before doing anything with a default base date + # that might need a timezone to interpret it. + + if { ![dict exists $fieldSet seconds] + && ![dict exists $fieldSet starDate] } { + if { [dict exists $fieldSet tzName] } { + append procBody { + set timeZone [dict get $date tzName] + } + } + append procBody { + ::tcl::clock::SetupTimeZone $timeZone + } + } + + # Add code that gets Julian Day Number from the fields. + + append procBody [MakeParseCodeFromFields $fieldSet $DateParseActions] + + # Get time of day + + append procBody [MakeParseCodeFromFields $fieldSet $TimeParseActions] + + # Assemble seconds from the Julian day and second of the day. + # Convert to local time unless epoch seconds or stardate are + # being processed - they're always absolute + + if { ![dict exists $fieldSet seconds] + && ![dict exists $fieldSet starDate] } { + append procBody { + if { [dict get $date julianDay] > 5373484 } { + return -code error -errorcode [list CLOCK dateTooLarge] \ + "requested date too large to represent" + } + dict set date localSeconds [expr { + -210866803200 + + ( 86400 * wide([dict get $date julianDay]) ) + + [dict get $date secondOfDay] + }] + } + + # Finally, convert the date to local time + + append procBody { + set date [::tcl::clock::ConvertLocalToUTC $date[set date {}] \ + $TZData($timeZone) $changeover] + } + } + + # Return result + + append procBody {return [dict get $date seconds]} \n + + proc $procName { string baseTime timeZone } $procBody + + # puts [list proc $procName [list string baseTime timeZone] $procBody] + + return $procName +} + +#---------------------------------------------------------------------- +# +# LocaleNumeralMatcher -- +# +# Composes a regexp that captures the numerals in the given locale, and +# a dictionary to map them to conventional numerals. +# +# Parameters: +# locale - Name of the current locale +# +# Results: +# Returns a two-element list comprising the regexp and the dictionary. +# +# Side effects: +# Caches the result. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::LocaleNumeralMatcher {l} { + variable LocaleNumeralCache + + if { ![dict exists $LocaleNumeralCache $l] } { + set d {} + set i 0 + set sep \( + foreach n [mc LOCALE_NUMERALS] { + dict set d $n $i + regsub -all {[^[:alnum:]]} $n \\\\& subex + append re $sep $subex + set sep | + incr i + } + append re \) + dict set LocaleNumeralCache $l [list $re $d] + } + return [dict get $LocaleNumeralCache $l] +} + + + +#---------------------------------------------------------------------- +# +# UniquePrefixRegexp -- +# +# Composes a regexp that performs unique-prefix matching. The RE +# matches one of a supplied set of strings, or any unique prefix +# thereof. +# +# Parameters: +# data - List of alternating match-strings and values. +# Match-strings with distinct values are considered +# distinct. +# +# Results: +# Returns a two-element list. The first is a regexp that matches any +# unique prefix of any of the strings. The second is a dictionary whose +# keys are match values from the regexp and whose values are the +# corresponding values from 'data'. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::UniquePrefixRegexp { data } { + # The 'successors' dictionary will contain, for each string that is a + # prefix of any key, all characters that may follow that prefix. The + # 'prefixMapping' dictionary will have keys that are prefixes of keys and + # values that correspond to the keys. + + set prefixMapping [dict create] + set successors [dict create {} {}] + + # Walk the key-value pairs + + foreach { key value } $data { + # Construct all prefixes of the key; + + set prefix {} + foreach char [split $key {}] { + set oldPrefix $prefix + dict set successors $oldPrefix $char {} + append prefix $char + + # Put the prefixes in the 'prefixMapping' and 'successors' + # dictionaries + + dict lappend prefixMapping $prefix $value + if { ![dict exists $successors $prefix] } { + dict set successors $prefix {} + } + } + } + + # Identify those prefixes that designate unique values, and those that are + # the full keys + + set uniquePrefixMapping {} + dict for { key valueList } $prefixMapping { + if { [llength $valueList] == 1 } { + dict set uniquePrefixMapping $key [lindex $valueList 0] + } + } + foreach { key value } $data { + dict set uniquePrefixMapping $key $value + } + + # Construct the re. + + return [list \ + [MakeUniquePrefixRegexp $successors $uniquePrefixMapping {}] \ + $uniquePrefixMapping] +} + +#---------------------------------------------------------------------- +# +# MakeUniquePrefixRegexp -- +# +# Service procedure for 'UniquePrefixRegexp' that constructs a regular +# expresison that matches the unique prefixes. +# +# Parameters: +# successors - Dictionary whose keys are all prefixes +# of keys passed to 'UniquePrefixRegexp' and whose +# values are dictionaries whose keys are the characters +# that may follow those prefixes. +# uniquePrefixMapping - Dictionary whose keys are the unique +# prefixes and whose values are not examined. +# prefixString - Current prefix being processed. +# +# Results: +# Returns a constructed regular expression that matches the set of +# unique prefixes beginning with the 'prefixString'. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::MakeUniquePrefixRegexp { successors + uniquePrefixMapping + prefixString } { + + # Get the characters that may follow the current prefix string + + set schars [lsort -ascii [dict keys [dict get $successors $prefixString]]] + if { [llength $schars] == 0 } { + return {} + } + + # If there is more than one successor character, or if the current prefix + # is a unique prefix, surround the generated re with non-capturing + # parentheses. + + set re {} + if { + [dict exists $uniquePrefixMapping $prefixString] + || [llength $schars] > 1 + } then { + append re "(?:" + } + + # Generate a regexp that matches the successors. + + set sep "" + foreach { c } $schars { + set nextPrefix $prefixString$c + regsub -all {[^[:alnum:]]} $c \\\\& rechar + append re $sep $rechar \ + [MakeUniquePrefixRegexp \ + $successors $uniquePrefixMapping $nextPrefix] + set sep | + } + + # If the current prefix is a unique prefix, make all following text + # optional. Otherwise, if there is more than one successor character, + # close the non-capturing parentheses. + + if { [dict exists $uniquePrefixMapping $prefixString] } { + append re ")?" + } elseif { [llength $schars] > 1 } { + append re ")" + } + + return $re +} + +#---------------------------------------------------------------------- +# +# MakeParseCodeFromFields -- +# +# Composes Tcl code to extract the Julian Day Number from a dictionary +# containing date fields. +# +# Parameters: +# dateFields -- Dictionary whose keys are fields of the date, +# and whose values are the rightmost positions +# at which those fields appear. +# parseActions -- List of triples: field set, priority, and +# code to emit. Smaller priorities are better, and +# the list must be in ascending order by priority +# +# Results: +# Returns a burst of code that extracts the day number from the given +# date. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::MakeParseCodeFromFields { dateFields parseActions } { + + set currPrio 999 + set currFieldPos [list] + set currCodeBurst { + error "in ::tcl::clock::MakeParseCodeFromFields: can't happen" + } + + foreach { fieldSet prio parseAction } $parseActions { + # If we've found an answer that's better than any that follow, quit + # now. + + if { $prio > $currPrio } { + break + } + + # Accumulate the field positions that are used in the current field + # grouping. + + set fieldPos [list] + set ok true + foreach field $fieldSet { + if { ! [dict exists $dateFields $field] } { + set ok 0 + break + } + lappend fieldPos [dict get $dateFields $field] + } + + # Quit if we don't have a complete set of fields + if { !$ok } { + continue + } + + # Determine whether the current answer is better than the last. + + set fPos [lsort -integer -decreasing $fieldPos] + + if { $prio == $currPrio } { + foreach currPos $currFieldPos newPos $fPos { + if { + ![string is integer $newPos] + || ![string is integer $currPos] + || $newPos > $currPos + } then { + break + } + if { $newPos < $currPos } { + set ok 0 + break + } + } + } + if { !$ok } { + continue + } + + # Remember the best possibility for extracting date information + + set currPrio $prio + set currFieldPos $fPos + set currCodeBurst $parseAction + } + + return $currCodeBurst +} + +#---------------------------------------------------------------------- +# +# EnterLocale -- +# +# Switch [mclocale] to a given locale if necessary +# +# Parameters: +# locale -- Desired locale +# +# Results: +# Returns the locale that was previously current. +# +# Side effects: +# Does [mclocale]. If necessary, loads the designated locale's files. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::EnterLocale { locale } { + if { $locale eq {system} } { + if { $::tcl_platform(platform) ne {windows} } { + # On a non-windows platform, the 'system' locale is the same as + # the 'current' locale + + set locale current + } else { + # On a windows platform, the 'system' locale is adapted from the + # 'current' locale by applying the date and time formats from the + # Control Panel. First, load the 'current' locale if it's not yet + # loaded + + mcpackagelocale set [mclocale] + + # Make a new locale string for the system locale, and get the + # Control Panel information + + set locale [mclocale]_windows + if { ! [mcpackagelocale present $locale] } { + LoadWindowsDateTimeFormats $locale + } + } + } + if { $locale eq {current}} { + set locale [mclocale] + } + # Eventually load the locale + mcpackagelocale set $locale +} + +#---------------------------------------------------------------------- +# +# LoadWindowsDateTimeFormats -- +# +# Load the date/time formats from the Control Panel in Windows and +# convert them so that they're usable by Tcl. +# +# Parameters: +# locale - Name of the locale in whose message catalog +# the converted formats are to be stored. +# +# Results: +# None. +# +# Side effects: +# Updates the given message catalog with the locale strings. +# +# Presumes that on entry, [mclocale] is set to the current locale, so that +# default strings can be obtained if the Registry query fails. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::LoadWindowsDateTimeFormats { locale } { + # Bail out if we can't find the Registry + + variable NoRegistry + if { [info exists NoRegistry] } return + + if { ![catch { + registry get "HKEY_CURRENT_USER\\Control Panel\\International" \ + sShortDate + } string] } { + set quote {} + set datefmt {} + foreach { unquoted quoted } [split $string '] { + append datefmt $quote [string map { + dddd %A + ddd %a + dd %d + d %e + MMMM %B + MMM %b + MM %m + M %N + yyyy %Y + yy %y + y %y + gg {} + } $unquoted] + if { $quoted eq {} } { + set quote ' + } else { + set quote $quoted + } + } + ::msgcat::mcset $locale DATE_FORMAT $datefmt + } + + if { ![catch { + registry get "HKEY_CURRENT_USER\\Control Panel\\International" \ + sLongDate + } string] } { + set quote {} + set ldatefmt {} + foreach { unquoted quoted } [split $string '] { + append ldatefmt $quote [string map { + dddd %A + ddd %a + dd %d + d %e + MMMM %B + MMM %b + MM %m + M %N + yyyy %Y + yy %y + y %y + gg {} + } $unquoted] + if { $quoted eq {} } { + set quote ' + } else { + set quote $quoted + } + } + ::msgcat::mcset $locale LOCALE_DATE_FORMAT $ldatefmt + } + + if { ![catch { + registry get "HKEY_CURRENT_USER\\Control Panel\\International" \ + sTimeFormat + } string] } { + set quote {} + set timefmt {} + foreach { unquoted quoted } [split $string '] { + append timefmt $quote [string map { + HH %H + H %k + hh %I + h %l + mm %M + m %M + ss %S + s %S + tt %p + t %p + } $unquoted] + if { $quoted eq {} } { + set quote ' + } else { + set quote $quoted + } + } + ::msgcat::mcset $locale TIME_FORMAT $timefmt + } + + catch { + ::msgcat::mcset $locale DATE_TIME_FORMAT "$datefmt $timefmt" + } + catch { + ::msgcat::mcset $locale LOCALE_DATE_TIME_FORMAT "$ldatefmt $timefmt" + } + + return + +} + +#---------------------------------------------------------------------- +# +# LocalizeFormat -- +# +# Map away locale-dependent format groups in a clock format. +# +# Parameters: +# locale -- Current [mclocale] locale, supplied to avoid +# an extra call +# format -- Format supplied to [clock scan] or [clock format] +# +# Results: +# Returns the string with locale-dependent composite format groups +# substituted out. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::LocalizeFormat { locale format } { + + # message catalog key to cache this format + set key FORMAT_$format + + if { [::msgcat::mcexists -exactlocale -exactnamespace $key] } { + return [mc $key] + } + # Handle locale-dependent format groups by mapping them out of the format + # string. Note that the order of the [string map] operations is + # significant because later formats can refer to later ones; for example + # %c can refer to %X, which in turn can refer to %T. + + set list { + %% %% + %D %m/%d/%Y + %+ {%a %b %e %H:%M:%S %Z %Y} + } + lappend list %EY [string map $list [mc LOCALE_YEAR_FORMAT]] + lappend list %T [string map $list [mc TIME_FORMAT_24_SECS]] + lappend list %R [string map $list [mc TIME_FORMAT_24]] + lappend list %r [string map $list [mc TIME_FORMAT_12]] + lappend list %X [string map $list [mc TIME_FORMAT]] + lappend list %EX [string map $list [mc LOCALE_TIME_FORMAT]] + lappend list %x [string map $list [mc DATE_FORMAT]] + lappend list %Ex [string map $list [mc LOCALE_DATE_FORMAT]] + lappend list %c [string map $list [mc DATE_TIME_FORMAT]] + lappend list %Ec [string map $list [mc LOCALE_DATE_TIME_FORMAT]] + set format [string map $list $format] + + ::msgcat::mcset $locale $key $format + return $format +} + +#---------------------------------------------------------------------- +# +# FormatNumericTimeZone -- +# +# Formats a time zone as +hhmmss +# +# Parameters: +# z - Time zone in seconds east of Greenwich +# +# Results: +# Returns the time zone formatted in a numeric form +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::FormatNumericTimeZone { z } { + if { $z < 0 } { + set z [expr { - $z }] + set retval - + } else { + set retval + + } + append retval [::format %02d [expr { $z / 3600 }]] + set z [expr { $z % 3600 }] + append retval [::format %02d [expr { $z / 60 }]] + set z [expr { $z % 60 }] + if { $z != 0 } { + append retval [::format %02d $z] + } + return $retval +} + +#---------------------------------------------------------------------- +# +# FormatStarDate -- +# +# Formats a date as a StarDate. +# +# Parameters: +# date - Dictionary containing 'year', 'dayOfYear', and +# 'localSeconds' fields. +# +# Results: +# Returns the given date formatted as a StarDate. +# +# Side effects: +# None. +# +# Jeff Hobbs put this in to support an atrocious pun about Tcl being +# "Enterprise ready." Now we're stuck with it. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::FormatStarDate { date } { + variable Roddenberry + + # Get day of year, zero based + + set doy [expr { [dict get $date dayOfYear] - 1 }] + + # Determine whether the year is a leap year + + set lp [IsGregorianLeapYear $date] + + # Convert day of year to a fractional year + + if { $lp } { + set fractYear [expr { 1000 * $doy / 366 }] + } else { + set fractYear [expr { 1000 * $doy / 365 }] + } + + # Put together the StarDate + + return [::format "Stardate %02d%03d.%1d" \ + [expr { [dict get $date year] - $Roddenberry }] \ + $fractYear \ + [expr { [dict get $date localSeconds] % 86400 + / ( 86400 / 10 ) }]] +} + +#---------------------------------------------------------------------- +# +# ParseStarDate -- +# +# Parses a StarDate +# +# Parameters: +# year - Year from the Roddenberry epoch +# fractYear - Fraction of a year specifying the day of year. +# fractDay - Fraction of a day +# +# Results: +# Returns a count of seconds from the Posix epoch. +# +# Side effects: +# None. +# +# Jeff Hobbs put this in to support an atrocious pun about Tcl being +# "Enterprise ready." Now we're stuck with it. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ParseStarDate { year fractYear fractDay } { + variable Roddenberry + + # Build a tentative date from year and fraction. + + set date [dict create \ + gregorian 1 \ + era CE \ + year [expr { $year + $Roddenberry }] \ + dayOfYear [expr { $fractYear * 365 / 1000 + 1 }]] + set date [GetJulianDayFromGregorianEraYearDay $date[set date {}]] + + # Determine whether the given year is a leap year + + set lp [IsGregorianLeapYear $date] + + # Reconvert the fractional year according to whether the given year is a + # leap year + + if { $lp } { + dict set date dayOfYear \ + [expr { $fractYear * 366 / 1000 + 1 }] + } else { + dict set date dayOfYear \ + [expr { $fractYear * 365 / 1000 + 1 }] + } + dict unset date julianDay + dict unset date gregorian + set date [GetJulianDayFromGregorianEraYearDay $date[set date {}]] + + return [expr { + 86400 * [dict get $date julianDay] + - 210866803200 + + ( 86400 / 10 ) * $fractDay + }] +} + +#---------------------------------------------------------------------- +# +# ScanWide -- +# +# Scans a wide integer from an input +# +# Parameters: +# str - String containing a decimal wide integer +# +# Results: +# Returns the string as a pure wide integer. Throws an error if the +# string is misformatted or out of range. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ScanWide { str } { + set count [::scan $str {%ld %c} result junk] + if { $count != 1 } { + return -code error -errorcode [list CLOCK notAnInteger $str] \ + "\"$str\" is not an integer" + } + if { [incr result 0] != $str } { + return -code error -errorcode [list CLOCK integervalueTooLarge] \ + "integer value too large to represent" + } + return $result +} + +#---------------------------------------------------------------------- +# +# InterpretTwoDigitYear -- +# +# Given a date that contains only the year of the century, determines +# the target value of a two-digit year. +# +# Parameters: +# date - Dictionary containing fields of the date. +# baseTime - Base time relative to which the date is expressed. +# twoDigitField - Name of the field that stores the two-digit year. +# Default is 'yearOfCentury' +# fourDigitField - Name of the field that will receive the four-digit +# year. Default is 'year' +# +# Results: +# Returns the dictionary augmented with the four-digit year, stored in +# the given key. +# +# Side effects: +# None. +# +# The current rule for interpreting a two-digit year is that the year shall be +# between 1937 and 2037, thus staying within the range of a 32-bit signed +# value for time. This rule may change to a sliding window in future +# versions, so the 'baseTime' parameter (which is currently ignored) is +# provided in the procedure signature. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::InterpretTwoDigitYear { date baseTime + { twoDigitField yearOfCentury } + { fourDigitField year } } { + set yr [dict get $date $twoDigitField] + if { $yr <= 37 } { + dict set date $fourDigitField [expr { $yr + 2000 }] + } else { + dict set date $fourDigitField [expr { $yr + 1900 }] + } + return $date +} + +#---------------------------------------------------------------------- +# +# AssignBaseYear -- +# +# Places the number of the current year into a dictionary. +# +# Parameters: +# date - Dictionary value to update +# baseTime - Base time from which to extract the year, expressed +# in seconds from the Posix epoch +# timezone - the time zone in which the date is being scanned +# changeover - the Julian Day on which the Gregorian calendar +# was adopted in the target locale. +# +# Results: +# Returns the dictionary with the current year assigned. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AssignBaseYear { date baseTime timezone changeover } { + variable TZData + + # Find the Julian Day Number corresponding to the base time, and + # find the Gregorian year corresponding to that Julian Day. + + set date2 [GetDateFields $baseTime $TZData($timezone) $changeover] + + # Store the converted year + + dict set date era [dict get $date2 era] + dict set date year [dict get $date2 year] + + return $date +} + +#---------------------------------------------------------------------- +# +# AssignBaseIso8601Year -- +# +# Determines the base year in the ISO8601 fiscal calendar. +# +# Parameters: +# date - Dictionary containing the fields of the date that +# is to be augmented with the base year. +# baseTime - Base time expressed in seconds from the Posix epoch. +# timeZone - Target time zone +# changeover - Julian Day of adoption of the Gregorian calendar in +# the target locale. +# +# Results: +# Returns the given date with "iso8601Year" set to the +# base year. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AssignBaseIso8601Year {date baseTime timeZone changeover} { + variable TZData + + # Find the Julian Day Number corresponding to the base time + + set date2 [GetDateFields $baseTime $TZData($timeZone) $changeover] + + # Calculate the ISO8601 date and transfer the year + + dict set date era CE + dict set date iso8601Year [dict get $date2 iso8601Year] + return $date +} + +#---------------------------------------------------------------------- +# +# AssignBaseMonth -- +# +# Places the number of the current year and month into a +# dictionary. +# +# Parameters: +# date - Dictionary value to update +# baseTime - Time from which the year and month are to be +# obtained, expressed in seconds from the Posix epoch. +# timezone - Name of the desired time zone +# changeover - Julian Day on which the Gregorian calendar was adopted. +# +# Results: +# Returns the dictionary with the base year and month assigned. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AssignBaseMonth {date baseTime timezone changeover} { + variable TZData + + # Find the year and month corresponding to the base time + + set date2 [GetDateFields $baseTime $TZData($timezone) $changeover] + dict set date era [dict get $date2 era] + dict set date year [dict get $date2 year] + dict set date month [dict get $date2 month] + return $date +} + +#---------------------------------------------------------------------- +# +# AssignBaseWeek -- +# +# Determines the base year and week in the ISO8601 fiscal calendar. +# +# Parameters: +# date - Dictionary containing the fields of the date that +# is to be augmented with the base year and week. +# baseTime - Base time expressed in seconds from the Posix epoch. +# changeover - Julian Day on which the Gregorian calendar was adopted +# in the target locale. +# +# Results: +# Returns the given date with "iso8601Year" set to the +# base year and "iso8601Week" to the week number. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AssignBaseWeek {date baseTime timeZone changeover} { + variable TZData + + # Find the Julian Day Number corresponding to the base time + + set date2 [GetDateFields $baseTime $TZData($timeZone) $changeover] + + # Calculate the ISO8601 date and transfer the year + + dict set date era CE + dict set date iso8601Year [dict get $date2 iso8601Year] + dict set date iso8601Week [dict get $date2 iso8601Week] + return $date +} + +#---------------------------------------------------------------------- +# +# AssignBaseJulianDay -- +# +# Determines the base day for a time-of-day conversion. +# +# Parameters: +# date - Dictionary that is to get the base day +# baseTime - Base time expressed in seconds from the Posix epoch +# changeover - Julian day on which the Gregorian calendar was +# adpoted in the target locale. +# +# Results: +# Returns the given dictionary augmented with a 'julianDay' field +# that contains the base day. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AssignBaseJulianDay { date baseTime timeZone changeover } { + variable TZData + + # Find the Julian Day Number corresponding to the base time + + set date2 [GetDateFields $baseTime $TZData($timeZone) $changeover] + dict set date julianDay [dict get $date2 julianDay] + + return $date +} + +#---------------------------------------------------------------------- +# +# InterpretHMSP -- +# +# Interprets a time in the form "hh:mm:ss am". +# +# Parameters: +# date -- Dictionary containing "hourAMPM", "minute", "second" +# and "amPmIndicator" fields. +# +# Results: +# Returns the number of seconds from local midnight. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::InterpretHMSP { date } { + set hr [dict get $date hourAMPM] + if { $hr == 12 } { + set hr 0 + } + if { [dict get $date amPmIndicator] } { + incr hr 12 + } + dict set date hour $hr + return [InterpretHMS $date[set date {}]] +} + +#---------------------------------------------------------------------- +# +# InterpretHMS -- +# +# Interprets a 24-hour time "hh:mm:ss" +# +# Parameters: +# date -- Dictionary containing the "hour", "minute" and "second" +# fields. +# +# Results: +# Returns the given dictionary augmented with a "secondOfDay" +# field containing the number of seconds from local midnight. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::InterpretHMS { date } { + return [expr { + ( [dict get $date hour] * 60 + + [dict get $date minute] ) * 60 + + [dict get $date second] + }] +} + +#---------------------------------------------------------------------- +# +# GetSystemTimeZone -- +# +# Determines the system time zone, which is the default for the +# 'clock' command if no other zone is supplied. +# +# Parameters: +# None. +# +# Results: +# Returns the system time zone. +# +# Side effects: +# Stores the system time zone in the 'CachedSystemTimeZone' +# variable, since determining it may be an expensive process. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::GetSystemTimeZone {} { + variable CachedSystemTimeZone + variable TimeZoneBad + + if {[set result [getenv TCL_TZ]] ne {}} { + set timezone $result + } elseif {[set result [getenv TZ]] ne {}} { + set timezone $result + } else { + # Cache the time zone only if it was detected by one of the + # expensive methods. + if { [info exists CachedSystemTimeZone] } { + set timezone $CachedSystemTimeZone + } elseif { $::tcl_platform(platform) eq {windows} } { + set timezone [GuessWindowsTimeZone] + } elseif { [file exists /etc/localtime] + && ![catch {ReadZoneinfoFile \ + Tcl/Localtime /etc/localtime}] } { + set timezone :Tcl/Localtime + } else { + set timezone :localtime + } + set CachedSystemTimeZone $timezone + } + if { ![dict exists $TimeZoneBad $timezone] } { + dict set TimeZoneBad $timezone [catch {SetupTimeZone $timezone}] + } + if { [dict get $TimeZoneBad $timezone] } { + return :localtime + } else { + return $timezone + } +} + +#---------------------------------------------------------------------- +# +# ConvertLegacyTimeZone -- +# +# Given an alphanumeric time zone identifier and the system time zone, +# convert the alphanumeric identifier to an unambiguous time zone. +# +# Parameters: +# tzname - Name of the time zone to convert +# +# Results: +# Returns a time zone name corresponding to tzname, but in an +# unambiguous form, generally +hhmm. +# +# This procedure is implemented primarily to allow the parsing of RFC822 +# date/time strings. Processing a time zone name on input is not recommended +# practice, because there is considerable room for ambiguity; for instance, is +# BST Brazilian Standard Time, or British Summer Time? +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ConvertLegacyTimeZone { tzname } { + variable LegacyTimeZone + + set tzname [string tolower $tzname] + if { ![dict exists $LegacyTimeZone $tzname] } { + return -code error -errorcode [list CLOCK badTZName $tzname] \ + "time zone \"$tzname\" not found" + } + return [dict get $LegacyTimeZone $tzname] +} + +#---------------------------------------------------------------------- +# +# SetupTimeZone -- +# +# Given the name or specification of a time zone, sets up its in-memory +# data. +# +# Parameters: +# tzname - Name of a time zone +# +# Results: +# Unless the time zone is ':localtime', sets the TZData array to contain +# the lookup table for local<->UTC conversion. Returns an error if the +# time zone cannot be parsed. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::SetupTimeZone { timezone } { + variable TZData + + if {! [info exists TZData($timezone)] } { + variable MINWIDE + if { $timezone eq {:localtime} } { + # Nothing to do, we'll convert using the localtime function + + } elseif { + [regexp {^([-+])(\d\d)(?::?(\d\d)(?::?(\d\d))?)?} $timezone \ + -> s hh mm ss] + } then { + # Make a fixed offset + + ::scan $hh %d hh + if { $mm eq {} } { + set mm 0 + } else { + ::scan $mm %d mm + } + if { $ss eq {} } { + set ss 0 + } else { + ::scan $ss %d ss + } + set offset [expr { ( $hh * 60 + $mm ) * 60 + $ss }] + if { $s eq {-} } { + set offset [expr { - $offset }] + } + set TZData($timezone) [list [list $MINWIDE $offset -1 $timezone]] + + } elseif { [string index $timezone 0] eq {:} } { + # Convert using a time zone file + + if { + [catch { + LoadTimeZoneFile [string range $timezone 1 end] + }] && [catch { + LoadZoneinfoFile [string range $timezone 1 end] + }] + } then { + return -code error \ + -errorcode [list CLOCK badTimeZone $timezone] \ + "time zone \"$timezone\" not found" + } + } elseif { ![catch {ParsePosixTimeZone $timezone} tzfields] } { + # This looks like a POSIX time zone - try to process it + + if { [catch {ProcessPosixTimeZone $tzfields} data opts] } { + if { [lindex [dict get $opts -errorcode] 0] eq {CLOCK} } { + dict unset opts -errorinfo + } + return -options $opts $data + } else { + set TZData($timezone) $data + } + + } else { + # We couldn't parse this as a POSIX time zone. Try again with a + # time zone file - this time without a colon + + if { [catch { LoadTimeZoneFile $timezone }] + && [catch { LoadZoneinfoFile $timezone } - opts] } { + dict unset opts -errorinfo + return -options $opts "time zone $timezone not found" + } + set TZData($timezone) $TZData(:$timezone) + } + } + + return +} + +#---------------------------------------------------------------------- +# +# GuessWindowsTimeZone -- +# +# Determines the system time zone on windows. +# +# Parameters: +# None. +# +# Results: +# Returns a time zone specifier that corresponds to the system time zone +# information found in the Registry. +# +# Bugs: +# Fixed dates for DST change are unimplemented at present, because no +# time zone information supplied with Windows actually uses them! +# +# On a Windows system where neither $env(TCL_TZ) nor $env(TZ) is specified, +# GuessWindowsTimeZone looks in the Registry for the system time zone +# information. It then attempts to find an entry in WinZoneInfo for a time +# zone that uses the same rules. If it finds one, it returns it; otherwise, +# it constructs a Posix-style time zone string and returns that. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::GuessWindowsTimeZone {} { + variable WinZoneInfo + variable NoRegistry + variable TimeZoneBad + + if { [info exists NoRegistry] } { + return :localtime + } + + # Dredge time zone information out of the registry + + if { [catch { + set rpath HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Control\\TimeZoneInformation + set data [list \ + [expr { -60 + * [registry get $rpath Bias] }] \ + [expr { -60 + * [registry get $rpath StandardBias] }] \ + [expr { -60 \ + * [registry get $rpath DaylightBias] }]] + set stdtzi [registry get $rpath StandardStart] + foreach ind {0 2 14 4 6 8 10 12} { + binary scan $stdtzi @${ind}s val + lappend data $val + } + set daytzi [registry get $rpath DaylightStart] + foreach ind {0 2 14 4 6 8 10 12} { + binary scan $daytzi @${ind}s val + lappend data $val + } + }] } { + # Missing values in the Registry - bail out + + return :localtime + } + + # Make up a Posix time zone specifier if we can't find one. Check here + # that the tzdata file exists, in case we're running in an environment + # (e.g. starpack) where tzdata is incomplete. (Bug 1237907) + + if { [dict exists $WinZoneInfo $data] } { + set tzname [dict get $WinZoneInfo $data] + if { ! [dict exists $TimeZoneBad $tzname] } { + dict set TimeZoneBad $tzname [catch {SetupTimeZone $tzname}] + } + } else { + set tzname {} + } + if { $tzname eq {} || [dict get $TimeZoneBad $tzname] } { + lassign $data \ + bias stdBias dstBias \ + stdYear stdMonth stdDayOfWeek stdDayOfMonth \ + stdHour stdMinute stdSecond stdMillisec \ + dstYear dstMonth dstDayOfWeek dstDayOfMonth \ + dstHour dstMinute dstSecond dstMillisec + set stdDelta [expr { $bias + $stdBias }] + set dstDelta [expr { $bias + $dstBias }] + if { $stdDelta <= 0 } { + set stdSignum + + set stdDelta [expr { - $stdDelta }] + set dispStdSignum - + } else { + set stdSignum - + set dispStdSignum + + } + set hh [::format %02d [expr { $stdDelta / 3600 }]] + set mm [::format %02d [expr { ($stdDelta / 60 ) % 60 }]] + set ss [::format %02d [expr { $stdDelta % 60 }]] + set tzname {} + append tzname < $dispStdSignum $hh $mm > $stdSignum $hh : $mm : $ss + if { $stdMonth >= 0 } { + if { $dstDelta <= 0 } { + set dstSignum + + set dstDelta [expr { - $dstDelta }] + set dispDstSignum - + } else { + set dstSignum - + set dispDstSignum + + } + set hh [::format %02d [expr { $dstDelta / 3600 }]] + set mm [::format %02d [expr { ($dstDelta / 60 ) % 60 }]] + set ss [::format %02d [expr { $dstDelta % 60 }]] + append tzname < $dispDstSignum $hh $mm > $dstSignum $hh : $mm : $ss + if { $dstYear == 0 } { + append tzname ,M $dstMonth . $dstDayOfMonth . $dstDayOfWeek + } else { + # I have not been able to find any locale on which Windows + # converts time zone on a fixed day of the year, hence don't + # know how to interpret the fields. If someone can inform me, + # I'd be glad to code it up. For right now, we bail out in + # such a case. + return :localtime + } + append tzname / [::format %02d $dstHour] \ + : [::format %02d $dstMinute] \ + : [::format %02d $dstSecond] + if { $stdYear == 0 } { + append tzname ,M $stdMonth . $stdDayOfMonth . $stdDayOfWeek + } else { + # I have not been able to find any locale on which Windows + # converts time zone on a fixed day of the year, hence don't + # know how to interpret the fields. If someone can inform me, + # I'd be glad to code it up. For right now, we bail out in + # such a case. + return :localtime + } + append tzname / [::format %02d $stdHour] \ + : [::format %02d $stdMinute] \ + : [::format %02d $stdSecond] + } + dict set WinZoneInfo $data $tzname + } + + return [dict get $WinZoneInfo $data] +} + +#---------------------------------------------------------------------- +# +# LoadTimeZoneFile -- +# +# Load the data file that specifies the conversion between a +# given time zone and Greenwich. +# +# Parameters: +# fileName -- Name of the file to load +# +# Results: +# None. +# +# Side effects: +# TZData(:fileName) contains the time zone data +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::LoadTimeZoneFile { fileName } { + variable DataDir + variable TZData + + if { [info exists TZData($fileName)] } { + return + } + + # Since an unsafe interp uses the [clock] command in the parent, this code + # is security sensitive. Make sure that the path name cannot escape the + # given directory. + + if { ![regexp {^[[.-.][:alpha:]_]+(?:/[[.-.][:alpha:]_]+)*$} $fileName] } { + return -code error \ + -errorcode [list CLOCK badTimeZone $:fileName] \ + "time zone \":$fileName\" not valid" + } + try { + source -encoding utf-8 [file join $DataDir $fileName] + } on error {} { + return -code error \ + -errorcode [list CLOCK badTimeZone :$fileName] \ + "time zone \":$fileName\" not found" + } + return +} + +#---------------------------------------------------------------------- +# +# LoadZoneinfoFile -- +# +# Loads a binary time zone information file in Olson format. +# +# Parameters: +# fileName - Relative path name of the file to load. +# +# Results: +# Returns an empty result normally; returns an error if no Olson file +# was found or the file was malformed in some way. +# +# Side effects: +# TZData(:fileName) contains the time zone data +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::LoadZoneinfoFile { fileName } { + variable ZoneinfoPaths + + # Since an unsafe interp uses the [clock] command in the parent, this code + # is security sensitive. Make sure that the path name cannot escape the + # given directory. + + if { ![regexp {^[[.-.][:alpha:]_]+(?:/[[.-.][:alpha:]_]+)*$} $fileName] } { + return -code error \ + -errorcode [list CLOCK badTimeZone $:fileName] \ + "time zone \":$fileName\" not valid" + } + foreach d $ZoneinfoPaths { + set fname [file join $d $fileName] + if { [file readable $fname] && [file isfile $fname] } { + break + } + unset fname + } + ReadZoneinfoFile $fileName $fname +} + +#---------------------------------------------------------------------- +# +# ReadZoneinfoFile -- +# +# Loads a binary time zone information file in Olson format. +# +# Parameters: +# fileName - Name of the time zone (relative path name of the +# file). +# fname - Absolute path name of the file. +# +# Results: +# Returns an empty result normally; returns an error if no Olson file +# was found or the file was malformed in some way. +# +# Side effects: +# TZData(:fileName) contains the time zone data +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ReadZoneinfoFile {fileName fname} { + variable MINWIDE + variable TZData + if { ![file exists $fname] } { + return -code error "$fileName not found" + } + + if { [file size $fname] > 262144 } { + return -code error "$fileName too big" + } + + # Suck in all the data from the file + + set f [open $fname r] + fconfigure $f -translation binary + set d [read $f] + close $f + + # The file begins with a magic number, sixteen reserved bytes, and then + # six 4-byte integers giving counts of fields in the file. + + binary scan $d a4a1x15IIIIII \ + magic version nIsGMT nIsStd nLeap nTime nType nChar + set seek 44 + set ilen 4 + set iformat I + if { $magic != {TZif} } { + return -code error "$fileName not a time zone information file" + } + if { $nType > 255 } { + return -code error "$fileName contains too many time types" + } + # Accept only Posix-style zoneinfo. Sorry, 'leaps' bigots. + if { $nLeap != 0 } { + return -code error "$fileName contains leap seconds" + } + + # In a version 2 file, we use the second part of the file, which contains + # 64-bit transition times. + + if {$version eq "2"} { + set seek [expr { + 44 + + 5 * $nTime + + 6 * $nType + + 4 * $nLeap + + $nIsStd + + $nIsGMT + + $nChar + }] + binary scan $d @${seek}a4a1x15IIIIII \ + magic version nIsGMT nIsStd nLeap nTime nType nChar + if {$magic ne {TZif}} { + return -code error "seek address $seek miscomputed, magic = $magic" + } + set iformat W + set ilen 8 + incr seek 44 + } + + # Next come ${nTime} transition times, followed by ${nTime} time type + # codes. The type codes are unsigned 1-byte quantities. We insert an + # arbitrary start time in front of the transitions. + + binary scan $d @${seek}${iformat}${nTime}c${nTime} times tempCodes + incr seek [expr { ($ilen + 1) * $nTime }] + set times [linsert $times 0 $MINWIDE] + set codes {} + foreach c $tempCodes { + lappend codes [expr { $c & 0xFF }] + } + set codes [linsert $codes 0 0] + + # Next come ${nType} time type descriptions, each of which has an offset + # (seconds east of GMT), a DST indicator, and an index into the + # abbreviation text. + + for { set i 0 } { $i < $nType } { incr i } { + binary scan $d @${seek}Icc gmtOff isDst abbrInd + lappend types [list $gmtOff $isDst $abbrInd] + incr seek 6 + } + + # Next come $nChar characters of time zone name abbreviations, which are + # null-terminated. + # We build them up into a dictionary indexed by character index, because + # that's what's in the indices above. + + binary scan $d @${seek}a${nChar} abbrs + incr seek ${nChar} + set abbrList [split $abbrs \0] + set i 0 + set abbrevs {} + foreach a $abbrList { + for {set j 0} {$j <= [string length $a]} {incr j} { + dict set abbrevs $i [string range $a $j end] + incr i + } + } + + # Package up a list of tuples, each of which contains transition time, + # seconds east of Greenwich, DST flag and time zone abbreviation. + + set r {} + set lastTime $MINWIDE + foreach t $times c $codes { + if { $t < $lastTime } { + return -code error "$fileName has times out of order" + } + set lastTime $t + lassign [lindex $types $c] gmtoff isDst abbrInd + set abbrev [dict get $abbrevs $abbrInd] + lappend r [list $t $gmtoff $isDst $abbrev] + } + + # In a version 2 file, there is also a POSIX-style time zone description + # at the very end of the file. To get to it, skip over nLeap leap second + # values (8 bytes each), + # nIsStd standard/DST indicators and nIsGMT UTC/local indicators. + + if {$version eq {2}} { + set seek [expr {$seek + 8 * $nLeap + $nIsStd + $nIsGMT + 1}] + set last [string first \n $d $seek] + set posix [string range $d $seek [expr {$last-1}]] + if {[llength $posix] > 0} { + set posixFields [ParsePosixTimeZone $posix] + foreach tuple [ProcessPosixTimeZone $posixFields] { + lassign $tuple t gmtoff isDst abbrev + if {$t > $lastTime} { + lappend r $tuple + } + } + } + } + + set TZData(:$fileName) $r + + return +} + +#---------------------------------------------------------------------- +# +# ParsePosixTimeZone -- +# +# Parses the TZ environment variable in Posix form +# +# Parameters: +# tz Time zone specifier to be interpreted +# +# Results: +# Returns a dictionary whose values contain the various pieces of the +# time zone specification. +# +# Side effects: +# None. +# +# Errors: +# Throws an error if the syntax of the time zone is incorrect. +# +# The following keys are present in the dictionary: +# stdName - Name of the time zone when Daylight Saving Time +# is not in effect. +# stdSignum - Sign (+, -, or empty) of the offset from Greenwich +# to the given (non-DST) time zone. + and the empty +# string denote zones west of Greenwich, - denotes east +# of Greenwich; this is contrary to the ISO convention +# but follows Posix. +# stdHours - Hours part of the offset from Greenwich to the given +# (non-DST) time zone. +# stdMinutes - Minutes part of the offset from Greenwich to the +# given (non-DST) time zone. Empty denotes zero. +# stdSeconds - Seconds part of the offset from Greenwich to the +# given (non-DST) time zone. Empty denotes zero. +# dstName - Name of the time zone when DST is in effect, or the +# empty string if the time zone does not observe Daylight +# Saving Time. +# dstSignum, dstHours, dstMinutes, dstSeconds - +# Fields corresponding to stdSignum, stdHours, stdMinutes, +# stdSeconds for the Daylight Saving Time version of the +# time zone. If dstHours is empty, it is presumed to be 1. +# startDayOfYear - The ordinal number of the day of the year on which +# Daylight Saving Time begins. If this field is +# empty, then DST begins on a given month-week-day, +# as below. +# startJ - The letter J, or an empty string. If a J is present in +# this field, then startDayOfYear does not count February 29 +# even in leap years. +# startMonth - The number of the month in which Daylight Saving Time +# begins, supplied if startDayOfYear is empty. If both +# startDayOfYear and startMonth are empty, then US rules +# are presumed. +# startWeekOfMonth - The number of the week in the month in which +# Daylight Saving Time begins, in the range 1-5. +# 5 denotes the last week of the month even in a +# 4-week month. +# startDayOfWeek - The number of the day of the week (Sunday=0, +# Saturday=6) on which Daylight Saving Time begins. +# startHours - The hours part of the time of day at which Daylight +# Saving Time begins. An empty string is presumed to be 2. +# startMinutes - The minutes part of the time of day at which DST begins. +# An empty string is presumed zero. +# startSeconds - The seconds part of the time of day at which DST begins. +# An empty string is presumed zero. +# endDayOfYear, endJ, endMonth, endWeekOfMonth, endDayOfWeek, +# endHours, endMinutes, endSeconds - +# Specify the end of DST in the same way that the start* fields +# specify the beginning of DST. +# +# This procedure serves only to break the time specifier into fields. No +# attempt is made to canonicalize the fields or supply default values. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ParsePosixTimeZone { tz } { + if {[regexp -expanded -nocase -- { + ^ + # 1 - Standard time zone name + ([[:alpha:]]+ | <[-+[:alnum:]]+>) + # 2 - Standard time zone offset, signum + ([-+]?) + # 3 - Standard time zone offset, hours + ([[:digit:]]{1,2}) + (?: + # 4 - Standard time zone offset, minutes + : ([[:digit:]]{1,2}) + (?: + # 5 - Standard time zone offset, seconds + : ([[:digit:]]{1,2} ) + )? + )? + (?: + # 6 - DST time zone name + ([[:alpha:]]+ | <[-+[:alnum:]]+>) + (?: + (?: + # 7 - DST time zone offset, signum + ([-+]?) + # 8 - DST time zone offset, hours + ([[:digit:]]{1,2}) + (?: + # 9 - DST time zone offset, minutes + : ([[:digit:]]{1,2}) + (?: + # 10 - DST time zone offset, seconds + : ([[:digit:]]{1,2}) + )? + )? + )? + (?: + , + (?: + # 11 - Optional J in n and Jn form 12 - Day of year + ( J ? ) ( [[:digit:]]+ ) + | M + # 13 - Month number 14 - Week of month 15 - Day of week + ( [[:digit:]] + ) + [.] ( [[:digit:]] + ) + [.] ( [[:digit:]] + ) + ) + (?: + # 16 - Start time of DST - hours + / ( [[:digit:]]{1,2} ) + (?: + # 17 - Start time of DST - minutes + : ( [[:digit:]]{1,2} ) + (?: + # 18 - Start time of DST - seconds + : ( [[:digit:]]{1,2} ) + )? + )? + )? + , + (?: + # 19 - Optional J in n and Jn form 20 - Day of year + ( J ? ) ( [[:digit:]]+ ) + | M + # 21 - Month number 22 - Week of month 23 - Day of week + ( [[:digit:]] + ) + [.] ( [[:digit:]] + ) + [.] ( [[:digit:]] + ) + ) + (?: + # 24 - End time of DST - hours + / ( [[:digit:]]{1,2} ) + (?: + # 25 - End time of DST - minutes + : ( [[:digit:]]{1,2} ) + (?: + # 26 - End time of DST - seconds + : ( [[:digit:]]{1,2} ) + )? + )? + )? + )? + )? + )? + $ + } $tz -> x(stdName) x(stdSignum) x(stdHours) x(stdMinutes) x(stdSeconds) \ + x(dstName) x(dstSignum) x(dstHours) x(dstMinutes) x(dstSeconds) \ + x(startJ) x(startDayOfYear) \ + x(startMonth) x(startWeekOfMonth) x(startDayOfWeek) \ + x(startHours) x(startMinutes) x(startSeconds) \ + x(endJ) x(endDayOfYear) \ + x(endMonth) x(endWeekOfMonth) x(endDayOfWeek) \ + x(endHours) x(endMinutes) x(endSeconds)] } { + # it's a good timezone + + return [array get x] + } + + return -code error\ + -errorcode [list CLOCK badTimeZone $tz] \ + "unable to parse time zone specification \"$tz\"" +} + +#---------------------------------------------------------------------- +# +# ProcessPosixTimeZone -- +# +# Handle a Posix time zone after it's been broken out into fields. +# +# Parameters: +# z - Dictionary returned from 'ParsePosixTimeZone' +# +# Results: +# Returns time zone information for the 'TZData' array. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ProcessPosixTimeZone { z } { + variable MINWIDE + variable TZData + + # Determine the standard time zone name and seconds east of Greenwich + + set stdName [dict get $z stdName] + if { [string index $stdName 0] eq {<} } { + set stdName [string range $stdName 1 end-1] + } + if { [dict get $z stdSignum] eq {-} } { + set stdSignum +1 + } else { + set stdSignum -1 + } + set stdHours [lindex [::scan [dict get $z stdHours] %d] 0] + if { [dict get $z stdMinutes] ne {} } { + set stdMinutes [lindex [::scan [dict get $z stdMinutes] %d] 0] + } else { + set stdMinutes 0 + } + if { [dict get $z stdSeconds] ne {} } { + set stdSeconds [lindex [::scan [dict get $z stdSeconds] %d] 0] + } else { + set stdSeconds 0 + } + set stdOffset [expr { + (($stdHours * 60 + $stdMinutes) * 60 + $stdSeconds) * $stdSignum + }] + set data [list [list $MINWIDE $stdOffset 0 $stdName]] + + # If there's no daylight zone, we're done + + set dstName [dict get $z dstName] + if { $dstName eq {} } { + return $data + } + if { [string index $dstName 0] eq {<} } { + set dstName [string range $dstName 1 end-1] + } + + # Determine the daylight name + + if { [dict get $z dstSignum] eq {-} } { + set dstSignum +1 + } else { + set dstSignum -1 + } + if { [dict get $z dstHours] eq {} } { + set dstOffset [expr { 3600 + $stdOffset }] + } else { + set dstHours [lindex [::scan [dict get $z dstHours] %d] 0] + if { [dict get $z dstMinutes] ne {} } { + set dstMinutes [lindex [::scan [dict get $z dstMinutes] %d] 0] + } else { + set dstMinutes 0 + } + if { [dict get $z dstSeconds] ne {} } { + set dstSeconds [lindex [::scan [dict get $z dstSeconds] %d] 0] + } else { + set dstSeconds 0 + } + set dstOffset [expr { + (($dstHours*60 + $dstMinutes) * 60 + $dstSeconds) * $dstSignum + }] + } + + # Fill in defaults for European or US DST rules + # US start time is the second Sunday in March + # EU start time is the last Sunday in March + # US end time is the first Sunday in November. + # EU end time is the last Sunday in October + + if { + [dict get $z startDayOfYear] eq {} + && [dict get $z startMonth] eq {} + } then { + if {($stdSignum * $stdHours>=0) && ($stdSignum * $stdHours<=12)} { + # EU + dict set z startWeekOfMonth 5 + if {$stdHours>2} { + dict set z startHours 2 + } else { + dict set z startHours [expr {$stdHours+1}] + } + } else { + # US + dict set z startWeekOfMonth 2 + dict set z startHours 2 + } + dict set z startMonth 3 + dict set z startDayOfWeek 0 + dict set z startMinutes 0 + dict set z startSeconds 0 + } + if { + [dict get $z endDayOfYear] eq {} + && [dict get $z endMonth] eq {} + } then { + if {($stdSignum * $stdHours>=0) && ($stdSignum * $stdHours<=12)} { + # EU + dict set z endMonth 10 + dict set z endWeekOfMonth 5 + if {$stdHours>2} { + dict set z endHours 3 + } else { + dict set z endHours [expr {$stdHours+2}] + } + } else { + # US + dict set z endMonth 11 + dict set z endWeekOfMonth 1 + dict set z endHours 2 + } + dict set z endDayOfWeek 0 + dict set z endMinutes 0 + dict set z endSeconds 0 + } + + # Put DST in effect in all years from 1916 to 2099. + + for { set y 1916 } { $y < 2100 } { incr y } { + set startTime [DeterminePosixDSTTime $z start $y] + incr startTime [expr { - wide($stdOffset) }] + set endTime [DeterminePosixDSTTime $z end $y] + incr endTime [expr { - wide($dstOffset) }] + if { $startTime < $endTime } { + lappend data \ + [list $startTime $dstOffset 1 $dstName] \ + [list $endTime $stdOffset 0 $stdName] + } else { + lappend data \ + [list $endTime $stdOffset 0 $stdName] \ + [list $startTime $dstOffset 1 $dstName] + } + } + + return $data +} + +#---------------------------------------------------------------------- +# +# DeterminePosixDSTTime -- +# +# Determines the time that Daylight Saving Time starts or ends from a +# Posix time zone specification. +# +# Parameters: +# z - Time zone data returned from ParsePosixTimeZone. +# Missing fields are expected to be filled in with +# default values. +# bound - The word 'start' or 'end' +# y - The year for which the transition time is to be determined. +# +# Results: +# Returns the transition time as a count of seconds from the epoch. The +# time is relative to the wall clock, not UTC. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::DeterminePosixDSTTime { z bound y } { + + variable FEB_28 + + # Determine the start or end day of DST + + set date [dict create era CE year $y] + set doy [dict get $z ${bound}DayOfYear] + if { $doy ne {} } { + + # Time was specified as a day of the year + + if { [dict get $z ${bound}J] ne {} + && [IsGregorianLeapYear $y] + && ( $doy > $FEB_28 ) } { + incr doy + } + dict set date dayOfYear $doy + set date [GetJulianDayFromEraYearDay $date[set date {}] 2361222] + } else { + # Time was specified as a day of the week within a month + + dict set date month [dict get $z ${bound}Month] + dict set date dayOfWeek [dict get $z ${bound}DayOfWeek] + set dowim [dict get $z ${bound}WeekOfMonth] + if { $dowim >= 5 } { + set dowim -1 + } + dict set date dayOfWeekInMonth $dowim + set date [GetJulianDayFromEraYearMonthWeekDay $date[set date {}] 2361222] + + } + + set jd [dict get $date julianDay] + set seconds [expr { + wide($jd) * wide(86400) - wide(210866803200) + }] + + set h [dict get $z ${bound}Hours] + if { $h eq {} } { + set h 2 + } else { + set h [lindex [::scan $h %d] 0] + } + set m [dict get $z ${bound}Minutes] + if { $m eq {} } { + set m 0 + } else { + set m [lindex [::scan $m %d] 0] + } + set s [dict get $z ${bound}Seconds] + if { $s eq {} } { + set s 0 + } else { + set s [lindex [::scan $s %d] 0] + } + set tod [expr { ( $h * 60 + $m ) * 60 + $s }] + return [expr { $seconds + $tod }] +} + +#---------------------------------------------------------------------- +# +# GetLocaleEra -- +# +# Given local time expressed in seconds from the Posix epoch, +# determine localized era and year within the era. +# +# Parameters: +# date - Dictionary that must contain the keys, 'localSeconds', +# whose value is expressed as the appropriate local time; +# and 'year', whose value is the Gregorian year. +# etable - Value of the LOCALE_ERAS key in the message catalogue +# for the target locale. +# +# Results: +# Returns the dictionary, augmented with the keys, 'localeEra' and +# 'localeYear'. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::GetLocaleEra { date etable } { + set index [BSearch $etable [dict get $date localSeconds]] + if { $index < 0} { + dict set date localeEra \ + [::format %02d [expr { [dict get $date year] / 100 }]] + dict set date localeYear [expr { + [dict get $date year] % 100 + }] + } else { + dict set date localeEra [lindex $etable $index 1] + dict set date localeYear [expr { + [dict get $date year] - [lindex $etable $index 2] + }] + } + return $date +} + +#---------------------------------------------------------------------- +# +# GetJulianDayFromEraYearDay -- +# +# Given a year, month and day on the Gregorian calendar, determines +# the Julian Day Number beginning at noon on that date. +# +# Parameters: +# date -- A dictionary in which the 'era', 'year', and +# 'dayOfYear' slots are populated. The calendar in use +# is determined by the date itself relative to: +# changeover -- Julian day on which the Gregorian calendar was +# adopted in the current locale. +# +# Results: +# Returns the given dictionary augmented with a 'julianDay' key whose +# value is the desired Julian Day Number, and a 'gregorian' key that +# specifies whether the calendar is Gregorian (1) or Julian (0). +# +# Side effects: +# None. +# +# Bugs: +# This code needs to be moved to the C layer. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::GetJulianDayFromEraYearDay {date changeover} { + # Get absolute year number from the civil year + + switch -exact -- [dict get $date era] { + BCE { + set year [expr { 1 - [dict get $date year] }] + } + CE { + set year [dict get $date year] + } + } + set ym1 [expr { $year - 1 }] + + # Try the Gregorian calendar first. + + dict set date gregorian 1 + set jd [expr { + 1721425 + + [dict get $date dayOfYear] + + ( 365 * $ym1 ) + + ( $ym1 / 4 ) + - ( $ym1 / 100 ) + + ( $ym1 / 400 ) + }] + + # If the date is before the Gregorian change, use the Julian calendar. + + if { $jd < $changeover } { + dict set date gregorian 0 + set jd [expr { + 1721423 + + [dict get $date dayOfYear] + + ( 365 * $ym1 ) + + ( $ym1 / 4 ) + }] + } + + dict set date julianDay $jd + return $date +} + +#---------------------------------------------------------------------- +# +# GetJulianDayFromEraYearMonthWeekDay -- +# +# Determines the Julian Day number corresponding to the nth given +# day-of-the-week in a given month. +# +# Parameters: +# date - Dictionary containing the keys, 'era', 'year', 'month' +# 'weekOfMonth', 'dayOfWeek', and 'dayOfWeekInMonth'. +# changeover - Julian Day of adoption of the Gregorian calendar +# +# Results: +# Returns the given dictionary, augmented with a 'julianDay' key. +# +# Side effects: +# None. +# +# Bugs: +# This code needs to be moved to the C layer. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::GetJulianDayFromEraYearMonthWeekDay {date changeover} { + # Come up with a reference day; either the zeroeth day of the given month + # (dayOfWeekInMonth >= 0) or the seventh day of the following month + # (dayOfWeekInMonth < 0) + + set date2 $date + set week [dict get $date dayOfWeekInMonth] + if { $week >= 0 } { + dict set date2 dayOfMonth 0 + } else { + dict incr date2 month + dict set date2 dayOfMonth 7 + } + set date2 [GetJulianDayFromEraYearMonthDay $date2[set date2 {}] \ + $changeover] + set wd0 [WeekdayOnOrBefore [dict get $date dayOfWeek] \ + [dict get $date2 julianDay]] + dict set date julianDay [expr { $wd0 + 7 * $week }] + return $date +} + +#---------------------------------------------------------------------- +# +# IsGregorianLeapYear -- +# +# Determines whether a given date represents a leap year in the +# Gregorian calendar. +# +# Parameters: +# date -- The date to test. The fields, 'era', 'year' and 'gregorian' +# must be set. +# +# Results: +# Returns 1 if the year is a leap year, 0 otherwise. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::IsGregorianLeapYear { date } { + switch -exact -- [dict get $date era] { + BCE { + set year [expr { 1 - [dict get $date year]}] + } + CE { + set year [dict get $date year] + } + } + if { $year % 4 != 0 } { + return 0 + } elseif { ![dict get $date gregorian] } { + return 1 + } elseif { $year % 400 == 0 } { + return 1 + } elseif { $year % 100 == 0 } { + return 0 + } else { + return 1 + } +} + +#---------------------------------------------------------------------- +# +# WeekdayOnOrBefore -- +# +# Determine the nearest day of week (given by the 'weekday' parameter, +# Sunday==0) on or before a given Julian Day. +# +# Parameters: +# weekday -- Day of the week +# j -- Julian Day number +# +# Results: +# Returns the Julian Day Number of the desired date. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::WeekdayOnOrBefore { weekday j } { + set k [expr { ( $weekday + 6 ) % 7 }] + return [expr { $j - ( $j - $k ) % 7 }] +} + +#---------------------------------------------------------------------- +# +# BSearch -- +# +# Service procedure that does binary search in several places inside the +# 'clock' command. +# +# Parameters: +# list - List of lists, sorted in ascending order by the +# first elements +# key - Value to search for +# +# Results: +# Returns the index of the greatest element in $list that is less than +# or equal to $key. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::BSearch { list key } { + if {[llength $list] == 0} { + return -1 + } + if { $key < [lindex $list 0 0] } { + return -1 + } + + set l 0 + set u [expr { [llength $list] - 1 }] + + while { $l < $u } { + # At this point, we know that + # $k >= [lindex $list $l 0] + # Either $u == [llength $list] or else $k < [lindex $list $u+1 0] + # We find the midpoint of the interval {l,u} rounded UP, compare + # against it, and set l or u to maintain the invariant. Note that the + # interval shrinks at each step, guaranteeing convergence. + + set m [expr { ( $l + $u + 1 ) / 2 }] + if { $key >= [lindex $list $m 0] } { + set l $m + } else { + set u [expr { $m - 1 }] + } + } + + return $l +} + +#---------------------------------------------------------------------- +# +# clock add -- +# +# Adds an offset to a given time. +# +# Syntax: +# clock add clockval ?count unit?... ?-option value? +# +# Parameters: +# clockval -- Starting time value +# count -- Amount of a unit of time to add +# unit -- Unit of time to add, must be one of: +# years year months month weeks week +# days day hours hour minutes minute +# seconds second +# +# Options: +# -gmt BOOLEAN +# (Deprecated) Flag synonymous with '-timezone :GMT' +# -timezone ZONE +# Name of the time zone in which calculations are to be done. +# -locale NAME +# Name of the locale in which calculations are to be done. +# Used to determine the Gregorian change date. +# +# Results: +# Returns the given time adjusted by the given offset(s) in +# order. +# +# Notes: +# It is possible that adding a number of months or years will adjust the +# day of the month as well. For instance, the time at one month after +# 31 January is either 28 or 29 February, because February has fewer +# than 31 days. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::add { clockval args } { + if { [llength $args] % 2 != 0 } { + set cmdName "clock add" + return -code error \ + -errorcode [list CLOCK wrongNumArgs] \ + "wrong \# args: should be\ + \"$cmdName clockval ?number units?...\ + ?-gmt boolean? ?-locale LOCALE? ?-timezone ZONE?\"" + } + if { [catch { expr {wide($clockval)} } result] } { + return -code error $result + } + + set offsets {} + set gmt 0 + set locale c + set timezone [GetSystemTimeZone] + + foreach { a b } $args { + if { [string is integer -strict $a] } { + lappend offsets $a $b + } else { + switch -exact -- $a { + -g - -gm - -gmt { + set saw(-gmt) {} + set gmt $b + } + -l - -lo - -loc - -loca - -local - -locale { + set locale [string tolower $b] + } + -t - -ti - -tim - -time - -timez - -timezo - -timezon - + -timezone { + set saw(-timezone) {} + set timezone $b + } + default { + throw [list CLOCK badOption $a] \ + "bad option \"$a\",\ + must be -gmt, -locale or -timezone" + } + } + } + } + + # Check options for validity + + if { [info exists saw(-gmt)] && [info exists saw(-timezone)] } { + return -code error \ + -errorcode [list CLOCK gmtWithTimezone] \ + "cannot use -gmt and -timezone in same call" + } + if { [catch { expr { wide($clockval) } } result] } { + return -code error "expected integer but got \"$clockval\"" + } + if { ![string is boolean -strict $gmt] } { + return -code error "expected boolean value but got \"$gmt\"" + } elseif { $gmt } { + set timezone :GMT + } + + EnterLocale $locale + + set changeover [mc GREGORIAN_CHANGE_DATE] + + if {[catch {SetupTimeZone $timezone} retval opts]} { + dict unset opts -errorinfo + return -options $opts $retval + } + + try { + foreach { quantity unit } $offsets { + switch -exact -- $unit { + years - year { + set clockval [AddMonths [expr { 12 * $quantity }] \ + $clockval $timezone $changeover] + } + months - month { + set clockval [AddMonths $quantity $clockval $timezone \ + $changeover] + } + + weeks - week { + set clockval [AddDays [expr { 7 * $quantity }] \ + $clockval $timezone $changeover] + } + days - day { + set clockval [AddDays $quantity $clockval $timezone \ + $changeover] + } + + hours - hour { + set clockval [expr { 3600 * $quantity + $clockval }] + } + minutes - minute { + set clockval [expr { 60 * $quantity + $clockval }] + } + seconds - second { + set clockval [expr { $quantity + $clockval }] + } + + default { + throw [list CLOCK badUnit $unit] \ + "unknown unit \"$unit\", must be \ + years, months, weeks, days, hours, minutes or seconds" + } + } + } + return $clockval + } trap CLOCK {result opts} { + # Conceal the innards of [clock] when it's an expected error + dict unset opts -errorinfo + return -options $opts $result + } +} + +#---------------------------------------------------------------------- +# +# AddMonths -- +# +# Add a given number of months to a given clock value in a given +# time zone. +# +# Parameters: +# months - Number of months to add (may be negative) +# clockval - Seconds since the epoch before the operation +# timezone - Time zone in which the operation is to be performed +# +# Results: +# Returns the new clock value as a number of seconds since +# the epoch. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AddMonths { months clockval timezone changeover } { + variable DaysInRomanMonthInCommonYear + variable DaysInRomanMonthInLeapYear + variable TZData + + # Convert the time to year, month, day, and fraction of day. + + set date [GetDateFields $clockval $TZData($timezone) $changeover] + dict set date secondOfDay [expr { + [dict get $date localSeconds] % 86400 + }] + dict set date tzName $timezone + + # Add the requisite number of months + + set m [dict get $date month] + incr m $months + incr m -1 + set delta [expr { $m / 12 }] + set mm [expr { $m % 12 }] + dict set date month [expr { $mm + 1 }] + dict incr date year $delta + + # If the date doesn't exist in the current month, repair it + + if { [IsGregorianLeapYear $date] } { + set hath [lindex $DaysInRomanMonthInLeapYear $mm] + } else { + set hath [lindex $DaysInRomanMonthInCommonYear $mm] + } + if { [dict get $date dayOfMonth] > $hath } { + dict set date dayOfMonth $hath + } + + # Reconvert to a number of seconds + + set date [GetJulianDayFromEraYearMonthDay \ + $date[set date {}]\ + $changeover] + dict set date localSeconds [expr { + -210866803200 + + ( 86400 * wide([dict get $date julianDay]) ) + + [dict get $date secondOfDay] + }] + set date [ConvertLocalToUTC $date[set date {}] $TZData($timezone) \ + $changeover] + + return [dict get $date seconds] + +} + +#---------------------------------------------------------------------- +# +# AddDays -- +# +# Add a given number of days to a given clock value in a given time +# zone. +# +# Parameters: +# days - Number of days to add (may be negative) +# clockval - Seconds since the epoch before the operation +# timezone - Time zone in which the operation is to be performed +# changeover - Julian Day on which the Gregorian calendar was adopted +# in the target locale. +# +# Results: +# Returns the new clock value as a number of seconds since the epoch. +# +# Side effects: +# None. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::AddDays { days clockval timezone changeover } { + variable TZData + + # Convert the time to Julian Day + + set date [GetDateFields $clockval $TZData($timezone) $changeover] + dict set date secondOfDay [expr { + [dict get $date localSeconds] % 86400 + }] + dict set date tzName $timezone + + # Add the requisite number of days + + dict incr date julianDay $days + + # Reconvert to a number of seconds + + dict set date localSeconds [expr { + -210866803200 + + ( 86400 * wide([dict get $date julianDay]) ) + + [dict get $date secondOfDay] + }] + set date [ConvertLocalToUTC $date[set date {}] $TZData($timezone) \ + $changeover] + + return [dict get $date seconds] + +} + +#---------------------------------------------------------------------- +# +# ChangeCurrentLocale -- +# +# The global locale was changed within msgcat. +# Clears the buffered parse functions of the current locale. +# +# Parameters: +# loclist (ignored) +# +# Results: +# None. +# +# Side effects: +# Buffered parse functions are cleared. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ChangeCurrentLocale {args} { + variable FormatProc + variable LocaleNumeralCache + variable CachedSystemTimeZone + variable TimeZoneBad + + foreach p [info procs [namespace current]::scanproc'*'current] { + rename $p {} + } + foreach p [info procs [namespace current]::formatproc'*'current] { + rename $p {} + } + + catch {array unset FormatProc *'current} + set LocaleNumeralCache {} +} + +#---------------------------------------------------------------------- +# +# ClearCaches -- +# +# Clears all caches to reclaim the memory used in [clock] +# +# Parameters: +# None. +# +# Results: +# None. +# +# Side effects: +# Caches are cleared. +# +#---------------------------------------------------------------------- + +proc ::tcl::clock::ClearCaches {} { + variable FormatProc + variable LocaleNumeralCache + variable CachedSystemTimeZone + variable TimeZoneBad + + foreach p [info procs [namespace current]::scanproc'*] { + rename $p {} + } + foreach p [info procs [namespace current]::formatproc'*] { + rename $p {} + } + + catch {unset FormatProc} + set LocaleNumeralCache {} + catch {unset CachedSystemTimeZone} + set TimeZoneBad {} + InitTZData +} diff --git a/llava/lib/tcl8.6/history.tcl b/llava/lib/tcl8.6/history.tcl new file mode 100644 index 0000000000000000000000000000000000000000..f06ffc94138f33c536a1aefa0eecd2413c6e7ee6 --- /dev/null +++ b/llava/lib/tcl8.6/history.tcl @@ -0,0 +1,335 @@ +# history.tcl -- +# +# Implementation of the history command. +# +# Copyright (c) 1997 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution of +# this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +# The tcl::history array holds the history list and some additional +# bookkeeping variables. +# +# nextid the index used for the next history list item. +# keep the max size of the history list +# oldest the index of the oldest item in the history. + +namespace eval ::tcl { + variable history + if {![info exists history]} { + array set history { + nextid 0 + keep 20 + oldest -20 + } + } + + namespace ensemble create -command ::tcl::history -map { + add ::tcl::HistAdd + change ::tcl::HistChange + clear ::tcl::HistClear + event ::tcl::HistEvent + info ::tcl::HistInfo + keep ::tcl::HistKeep + nextid ::tcl::HistNextID + redo ::tcl::HistRedo + } +} + +# history -- +# +# This is the main history command. See the man page for its interface. +# This does some argument checking and calls the helper ensemble in the +# tcl namespace. + +proc ::history {args} { + # If no command given, we're doing 'history info'. Can't be done with an + # ensemble unknown handler, as those don't fire when no subcommand is + # given at all. + + if {![llength $args]} { + set args info + } + + # Tricky stuff needed to make stack and errors come out right! + tailcall apply {arglist {tailcall ::tcl::history {*}$arglist} ::tcl} $args +} + +# (unnamed) -- +# +# Callback when [::history] is destroyed. Destroys the implementation. +# +# Parameters: +# oldName what the command was called. +# newName what the command is now called (an empty string). +# op the operation (= delete). +# +# Results: +# none +# +# Side Effects: +# The implementation of the [::history] command ceases to exist. + +trace add command ::history delete [list apply {{oldName newName op} { + variable history + unset -nocomplain history + foreach c [info procs ::tcl::Hist*] { + rename $c {} + } + rename ::tcl::history {} +} ::tcl}] + +# tcl::HistAdd -- +# +# Add an item to the history, and optionally eval it at the global scope +# +# Parameters: +# event the command to add +# exec (optional) a substring of "exec" causes the command to +# be evaled. +# Results: +# If executing, then the results of the command are returned +# +# Side Effects: +# Adds to the history list + +proc ::tcl::HistAdd {event {exec {}}} { + variable history + + if { + [prefix longest {exec {}} $exec] eq "" + && [llength [info level 0]] == 3 + } then { + return -code error "bad argument \"$exec\": should be \"exec\"" + } + + # Do not add empty commands to the history + if {[string trim $event] eq ""} { + return "" + } + + # Maintain the history + set history([incr history(nextid)]) $event + unset -nocomplain history([incr history(oldest)]) + + # Only execute if 'exec' (or non-empty prefix of it) given + if {$exec eq ""} { + return "" + } + tailcall eval $event +} + +# tcl::HistKeep -- +# +# Set or query the limit on the length of the history list +# +# Parameters: +# limit (optional) the length of the history list +# +# Results: +# If no limit is specified, the current limit is returned +# +# Side Effects: +# Updates history(keep) if a limit is specified + +proc ::tcl::HistKeep {{count {}}} { + variable history + if {[llength [info level 0]] == 1} { + return $history(keep) + } + if {![string is integer -strict $count] || ($count < 0)} { + return -code error "illegal keep count \"$count\"" + } + set oldold $history(oldest) + set history(oldest) [expr {$history(nextid) - $count}] + for {} {$oldold <= $history(oldest)} {incr oldold} { + unset -nocomplain history($oldold) + } + set history(keep) $count +} + +# tcl::HistClear -- +# +# Erase the history list +# +# Parameters: +# none +# +# Results: +# none +# +# Side Effects: +# Resets the history array, except for the keep limit + +proc ::tcl::HistClear {} { + variable history + set keep $history(keep) + unset history + array set history [list \ + nextid 0 \ + keep $keep \ + oldest -$keep \ + ] +} + +# tcl::HistInfo -- +# +# Return a pretty-printed version of the history list +# +# Parameters: +# num (optional) the length of the history list to return +# +# Results: +# A formatted history list + +proc ::tcl::HistInfo {{count {}}} { + variable history + if {[llength [info level 0]] == 1} { + set count [expr {$history(keep) + 1}] + } elseif {![string is integer -strict $count]} { + return -code error "bad integer \"$count\"" + } + set result {} + set newline "" + for {set i [expr {$history(nextid) - $count + 1}]} \ + {$i <= $history(nextid)} {incr i} { + if {![info exists history($i)]} { + continue + } + set cmd [string map [list \n \n\t] [string trimright $history($i) \ \n]] + append result $newline[format "%6d %s" $i $cmd] + set newline \n + } + return $result +} + +# tcl::HistRedo -- +# +# Fetch the previous or specified event, execute it, and then replace +# the current history item with that event. +# +# Parameters: +# event (optional) index of history item to redo. Defaults to -1, +# which means the previous event. +# +# Results: +# Those of the command being redone. +# +# Side Effects: +# Replaces the current history list item with the one being redone. + +proc ::tcl::HistRedo {{event -1}} { + variable history + + set i [HistIndex $event] + if {$i == $history(nextid)} { + return -code error "cannot redo the current event" + } + set cmd $history($i) + HistChange $cmd 0 + tailcall eval $cmd +} + +# tcl::HistIndex -- +# +# Map from an event specifier to an index in the history list. +# +# Parameters: +# event index of history item to redo. +# If this is a positive number, it is used directly. +# If it is a negative number, then it counts back to a previous +# event, where -1 is the most recent event. +# A string can be matched, either by being the prefix of a +# command or by matching a command with string match. +# +# Results: +# The index into history, or an error if the index didn't match. + +proc ::tcl::HistIndex {event} { + variable history + if {![string is integer -strict $event]} { + for {set i [expr {$history(nextid)-1}]} {[info exists history($i)]} \ + {incr i -1} { + if {[string match $event* $history($i)]} { + return $i + } + if {[string match $event $history($i)]} { + return $i + } + } + return -code error "no event matches \"$event\"" + } elseif {$event <= 0} { + set i [expr {$history(nextid) + $event}] + } else { + set i $event + } + if {$i <= $history(oldest)} { + return -code error "event \"$event\" is too far in the past" + } + if {$i > $history(nextid)} { + return -code error "event \"$event\" hasn't occurred yet" + } + return $i +} + +# tcl::HistEvent -- +# +# Map from an event specifier to the value in the history list. +# +# Parameters: +# event index of history item to redo. See index for a description of +# possible event patterns. +# +# Results: +# The value from the history list. + +proc ::tcl::HistEvent {{event -1}} { + variable history + set i [HistIndex $event] + if {![info exists history($i)]} { + return "" + } + return [string trimright $history($i) \ \n] +} + +# tcl::HistChange -- +# +# Replace a value in the history list. +# +# Parameters: +# newValue The new value to put into the history list. +# event (optional) index of history item to redo. See index for a +# description of possible event patterns. This defaults to 0, +# which specifies the current event. +# +# Side Effects: +# Changes the history list. + +proc ::tcl::HistChange {newValue {event 0}} { + variable history + set i [HistIndex $event] + set history($i) $newValue +} + +# tcl::HistNextID -- +# +# Returns the number of the next history event. +# +# Parameters: +# None. +# +# Side Effects: +# None. + +proc ::tcl::HistNextID {} { + variable history + return [expr {$history(nextid) + 1}] +} + +return + +# Local Variables: +# mode: tcl +# fill-column: 78 +# End: diff --git a/llava/lib/tcl8.6/init.tcl b/llava/lib/tcl8.6/init.tcl new file mode 100644 index 0000000000000000000000000000000000000000..1a512940197987010b2517780971b59956a5ad28 --- /dev/null +++ b/llava/lib/tcl8.6/init.tcl @@ -0,0 +1,827 @@ +# init.tcl -- +# +# Default system startup file for Tcl-based applications. Defines +# "unknown" procedure and auto-load facilities. +# +# Copyright (c) 1991-1993 The Regents of the University of California. +# Copyright (c) 1994-1996 Sun Microsystems, Inc. +# Copyright (c) 1998-1999 Scriptics Corporation. +# Copyright (c) 2004 Kevin B. Kenny. All rights reserved. +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +# This test intentionally written in pre-7.5 Tcl +if {[info commands package] == ""} { + error "version mismatch: library\nscripts expect Tcl version 7.5b1 or later but the loaded version is\nonly [info patchlevel]" +} +package require -exact Tcl 8.6.14 + +# Compute the auto path to use in this interpreter. +# The values on the path come from several locations: +# +# The environment variable TCLLIBPATH +# +# tcl_library, which is the directory containing this init.tcl script. +# [tclInit] (Tcl_Init()) searches around for the directory containing this +# init.tcl and defines tcl_library to that location before sourcing it. +# +# The parent directory of tcl_library. Adding the parent +# means that packages in peer directories will be found automatically. +# +# Also add the directory ../lib relative to the directory where the +# executable is located. This is meant to find binary packages for the +# same architecture as the current executable. +# +# tcl_pkgPath, which is set by the platform-specific initialization routines +# On UNIX it is compiled in +# On Windows, it is not used +# +# (Ticket 41c9857bdd) In a safe interpreter, this file does not set +# ::auto_path (other than to {} if it is undefined). The caller, typically +# a Safe Base command, is responsible for setting ::auto_path. + +if {![info exists auto_path]} { + if {[info exists env(TCLLIBPATH)] && (![interp issafe])} { + set auto_path $env(TCLLIBPATH) + } else { + set auto_path "" + } +} +namespace eval tcl { + if {![interp issafe]} { + variable Dir + foreach Dir [list $::tcl_library [file dirname $::tcl_library]] { + if {$Dir ni $::auto_path} { + lappend ::auto_path $Dir + } + } + set Dir [file join [file dirname [file dirname \ + [info nameofexecutable]]] lib] + if {$Dir ni $::auto_path} { + lappend ::auto_path $Dir + } + if {[info exists ::tcl_pkgPath]} { catch { + foreach Dir $::tcl_pkgPath { + if {$Dir ni $::auto_path} { + lappend ::auto_path $Dir + } + } + }} + + variable Path [encoding dirs] + set Dir [file join $::tcl_library encoding] + if {$Dir ni $Path} { + lappend Path $Dir + encoding dirs $Path + } + unset Dir Path + } + + # TIP #255 min and max functions + namespace eval mathfunc { + proc min {args} { + if {![llength $args]} { + return -code error \ + "not enough arguments to math function \"min\"" + } + set val Inf + foreach arg $args { + # This will handle forcing the numeric value without + # ruining the internal type of a numeric object + if {[catch {expr {double($arg)}} err]} { + return -code error $err + } + if {$arg < $val} {set val $arg} + } + return $val + } + proc max {args} { + if {![llength $args]} { + return -code error \ + "not enough arguments to math function \"max\"" + } + set val -Inf + foreach arg $args { + # This will handle forcing the numeric value without + # ruining the internal type of a numeric object + if {[catch {expr {double($arg)}} err]} { + return -code error $err + } + if {$arg > $val} {set val $arg} + } + return $val + } + namespace export min max + } +} + +# Windows specific end of initialization + +if {(![interp issafe]) && ($tcl_platform(platform) eq "windows")} { + namespace eval tcl { + proc EnvTraceProc {lo n1 n2 op} { + global env + set x $env($n2) + set env($lo) $x + set env([string toupper $lo]) $x + } + proc InitWinEnv {} { + global env tcl_platform + foreach p [array names env] { + set u [string toupper $p] + if {$u ne $p} { + switch -- $u { + COMSPEC - + PATH { + set temp $env($p) + unset env($p) + set env($u) $temp + trace add variable env($p) write \ + [namespace code [list EnvTraceProc $p]] + trace add variable env($u) write \ + [namespace code [list EnvTraceProc $p]] + } + } + } + } + if {![info exists env(COMSPEC)]} { + set env(COMSPEC) cmd.exe + } + } + InitWinEnv + } +} + +# Setup the unknown package handler + + +if {[interp issafe]} { + package unknown {::tcl::tm::UnknownHandler ::tclPkgUnknown} +} else { + # Set up search for Tcl Modules (TIP #189). + # and setup platform specific unknown package handlers + if {$tcl_platform(os) eq "Darwin" + && $tcl_platform(platform) eq "unix"} { + package unknown {::tcl::tm::UnknownHandler \ + {::tcl::MacOSXPkgUnknown ::tclPkgUnknown}} + } else { + package unknown {::tcl::tm::UnknownHandler ::tclPkgUnknown} + } + + # Set up the 'clock' ensemble + + namespace eval ::tcl::clock [list variable TclLibDir $::tcl_library] + + proc ::tcl::initClock {} { + # Auto-loading stubs for 'clock.tcl' + + foreach cmd {add format scan} { + proc ::tcl::clock::$cmd args { + variable TclLibDir + source -encoding utf-8 [file join $TclLibDir clock.tcl] + return [uplevel 1 [info level 0]] + } + } + + rename ::tcl::initClock {} + } + ::tcl::initClock +} + +# Conditionalize for presence of exec. + +if {[namespace which -command exec] eq ""} { + + # Some machines do not have exec. Also, on all + # platforms, safe interpreters do not have exec. + + set auto_noexec 1 +} + +# Define a log command (which can be overwritten to log errors +# differently, specially when stderr is not available) + +if {[namespace which -command tclLog] eq ""} { + proc tclLog {string} { + catch {puts stderr $string} + } +} + +# unknown -- +# This procedure is called when a Tcl command is invoked that doesn't +# exist in the interpreter. It takes the following steps to make the +# command available: +# +# 1. See if the autoload facility can locate the command in a +# Tcl script file. If so, load it and execute it. +# 2. If the command was invoked interactively at top-level: +# (a) see if the command exists as an executable UNIX program. +# If so, "exec" the command. +# (b) see if the command requests csh-like history substitution +# in one of the common forms !!, !, or ^old^new. If +# so, emulate csh's history substitution. +# (c) see if the command is a unique abbreviation for another +# command. If so, invoke the command. +# +# Arguments: +# args - A list whose elements are the words of the original +# command, including the command name. + +proc unknown args { + variable ::tcl::UnknownPending + global auto_noexec auto_noload env tcl_interactive errorInfo errorCode + + if {[info exists errorInfo]} { + set savedErrorInfo $errorInfo + } + if {[info exists errorCode]} { + set savedErrorCode $errorCode + } + + set name [lindex $args 0] + if {![info exists auto_noload]} { + # + # Make sure we're not trying to load the same proc twice. + # + if {[info exists UnknownPending($name)]} { + return -code error "self-referential recursion\ + in \"unknown\" for command \"$name\"" + } + set UnknownPending($name) pending + set ret [catch { + auto_load $name [uplevel 1 {::namespace current}] + } msg opts] + unset UnknownPending($name) + if {$ret != 0} { + dict append opts -errorinfo "\n (autoloading \"$name\")" + return -options $opts $msg + } + if {![array size UnknownPending]} { + unset UnknownPending + } + if {$msg} { + if {[info exists savedErrorCode]} { + set ::errorCode $savedErrorCode + } else { + unset -nocomplain ::errorCode + } + if {[info exists savedErrorInfo]} { + set errorInfo $savedErrorInfo + } else { + unset -nocomplain errorInfo + } + set code [catch {uplevel 1 $args} msg opts] + if {$code == 1} { + # + # Compute stack trace contribution from the [uplevel]. + # Note the dependence on how Tcl_AddErrorInfo, etc. + # construct the stack trace. + # + set errInfo [dict get $opts -errorinfo] + set errCode [dict get $opts -errorcode] + set cinfo $args + if {[string bytelength $cinfo] > 150} { + set cinfo [string range $cinfo 0 150] + while {[string bytelength $cinfo] > 150} { + set cinfo [string range $cinfo 0 end-1] + } + append cinfo ... + } + set tail "\n (\"uplevel\" body line 1)\n invoked\ + from within\n\"uplevel 1 \$args\"" + set expect "$msg\n while executing\n\"$cinfo\"$tail" + if {$errInfo eq $expect} { + # + # The stack has only the eval from the expanded command + # Do not generate any stack trace here. + # + dict unset opts -errorinfo + dict incr opts -level + return -options $opts $msg + } + # + # Stack trace is nested, trim off just the contribution + # from the extra "eval" of $args due to the "catch" above. + # + set last [string last $tail $errInfo] + if {$last + [string length $tail] != [string length $errInfo]} { + # Very likely cannot happen + return -options $opts $msg + } + set errInfo [string range $errInfo 0 $last-1] + set tail "\"$cinfo\"" + set last [string last $tail $errInfo] + if {$last < 0 || $last + [string length $tail] != [string length $errInfo]} { + return -code error -errorcode $errCode \ + -errorinfo $errInfo $msg + } + set errInfo [string range $errInfo 0 $last-1] + set tail "\n invoked from within\n" + set last [string last $tail $errInfo] + if {$last + [string length $tail] == [string length $errInfo]} { + return -code error -errorcode $errCode \ + -errorinfo [string range $errInfo 0 $last-1] $msg + } + set tail "\n while executing\n" + set last [string last $tail $errInfo] + if {$last + [string length $tail] == [string length $errInfo]} { + return -code error -errorcode $errCode \ + -errorinfo [string range $errInfo 0 $last-1] $msg + } + return -options $opts $msg + } else { + dict incr opts -level + return -options $opts $msg + } + } + } + + if {([info level] == 1) && ([info script] eq "") + && [info exists tcl_interactive] && $tcl_interactive} { + if {![info exists auto_noexec]} { + set new [auto_execok $name] + if {$new ne ""} { + set redir "" + if {[namespace which -command console] eq ""} { + set redir ">&@stdout <@stdin" + } + uplevel 1 [list ::catch \ + [concat exec $redir $new [lrange $args 1 end]] \ + ::tcl::UnknownResult ::tcl::UnknownOptions] + dict incr ::tcl::UnknownOptions -level + return -options $::tcl::UnknownOptions $::tcl::UnknownResult + } + } + if {$name eq "!!"} { + set newcmd [history event] + } elseif {[regexp {^!(.+)$} $name -> event]} { + set newcmd [history event $event] + } elseif {[regexp {^\^([^^]*)\^([^^]*)\^?$} $name -> old new]} { + set newcmd [history event -1] + catch {regsub -all -- $old $newcmd $new newcmd} + } + if {[info exists newcmd]} { + tclLog $newcmd + history change $newcmd 0 + uplevel 1 [list ::catch $newcmd \ + ::tcl::UnknownResult ::tcl::UnknownOptions] + dict incr ::tcl::UnknownOptions -level + return -options $::tcl::UnknownOptions $::tcl::UnknownResult + } + + set ret [catch [list uplevel 1 [list info commands $name*]] candidates] + if {$name eq "::"} { + set name "" + } + if {$ret != 0} { + dict append opts -errorinfo \ + "\n (expanding command prefix \"$name\" in unknown)" + return -options $opts $candidates + } + # Filter out bogus matches when $name contained + # a glob-special char [Bug 946952] + if {$name eq ""} { + # Handle empty $name separately due to strangeness + # in [string first] (See RFE 1243354) + set cmds $candidates + } else { + set cmds [list] + foreach x $candidates { + if {[string first $name $x] == 0} { + lappend cmds $x + } + } + } + if {[llength $cmds] == 1} { + uplevel 1 [list ::catch [lreplace $args 0 0 [lindex $cmds 0]] \ + ::tcl::UnknownResult ::tcl::UnknownOptions] + dict incr ::tcl::UnknownOptions -level + return -options $::tcl::UnknownOptions $::tcl::UnknownResult + } + if {[llength $cmds]} { + return -code error "ambiguous command name \"$name\": [lsort $cmds]" + } + } + return -code error -errorcode [list TCL LOOKUP COMMAND $name] \ + "invalid command name \"$name\"" +} + +# auto_load -- +# Checks a collection of library directories to see if a procedure +# is defined in one of them. If so, it sources the appropriate +# library file to create the procedure. Returns 1 if it successfully +# loaded the procedure, 0 otherwise. +# +# Arguments: +# cmd - Name of the command to find and load. +# namespace (optional) The namespace where the command is being used - must be +# a canonical namespace as returned [namespace current] +# for instance. If not given, namespace current is used. + +proc auto_load {cmd {namespace {}}} { + global auto_index auto_path + + if {$namespace eq ""} { + set namespace [uplevel 1 [list ::namespace current]] + } + set nameList [auto_qualify $cmd $namespace] + # workaround non canonical auto_index entries that might be around + # from older auto_mkindex versions + lappend nameList $cmd + foreach name $nameList { + if {[info exists auto_index($name)]} { + namespace eval :: $auto_index($name) + # There's a couple of ways to look for a command of a given + # name. One is to use + # info commands $name + # Unfortunately, if the name has glob-magic chars in it like * + # or [], it may not match. For our purposes here, a better + # route is to use + # namespace which -command $name + if {[namespace which -command $name] ne ""} { + return 1 + } + } + } + if {![info exists auto_path]} { + return 0 + } + + if {![auto_load_index]} { + return 0 + } + foreach name $nameList { + if {[info exists auto_index($name)]} { + namespace eval :: $auto_index($name) + if {[namespace which -command $name] ne ""} { + return 1 + } + } + } + return 0 +} + +# auto_load_index -- +# Loads the contents of tclIndex files on the auto_path directory +# list. This is usually invoked within auto_load to load the index +# of available commands. Returns 1 if the index is loaded, and 0 if +# the index is already loaded and up to date. +# +# Arguments: +# None. + +proc auto_load_index {} { + variable ::tcl::auto_oldpath + global auto_index auto_path + + if {[info exists auto_oldpath] && ($auto_oldpath eq $auto_path)} { + return 0 + } + set auto_oldpath $auto_path + + # Check if we are a safe interpreter. In that case, we support only + # newer format tclIndex files. + + set issafe [interp issafe] + for {set i [expr {[llength $auto_path] - 1}]} {$i >= 0} {incr i -1} { + set dir [lindex $auto_path $i] + set f "" + if {$issafe} { + catch {source [file join $dir tclIndex]} + } elseif {[catch {set f [open [file join $dir tclIndex]]}]} { + continue + } else { + set error [catch { + fconfigure $f -eofchar "\x1A {}" + set id [gets $f] + if {$id eq "# Tcl autoload index file, version 2.0"} { + eval [read $f] + } elseif {$id eq "# Tcl autoload index file: each line identifies a Tcl"} { + while {[gets $f line] >= 0} { + if {([string index $line 0] eq "#") \ + || ([llength $line] != 2)} { + continue + } + set name [lindex $line 0] + set auto_index($name) \ + "source [file join $dir [lindex $line 1]]" + } + } else { + error "[file join $dir tclIndex] isn't a proper Tcl index file" + } + } msg opts] + if {$f ne ""} { + close $f + } + if {$error} { + return -options $opts $msg + } + } + } + return 1 +} + +# auto_qualify -- +# +# Compute a fully qualified names list for use in the auto_index array. +# For historical reasons, commands in the global namespace do not have leading +# :: in the index key. The list has two elements when the command name is +# relative (no leading ::) and the namespace is not the global one. Otherwise +# only one name is returned (and searched in the auto_index). +# +# Arguments - +# cmd The command name. Can be any name accepted for command +# invocations (Like "foo::::bar"). +# namespace The namespace where the command is being used - must be +# a canonical namespace as returned by [namespace current] +# for instance. + +proc auto_qualify {cmd namespace} { + + # count separators and clean them up + # (making sure that foo:::::bar will be treated as foo::bar) + set n [regsub -all {::+} $cmd :: cmd] + + # Ignore namespace if the name starts with :: + # Handle special case of only leading :: + + # Before each return case we give an example of which category it is + # with the following form : + # (inputCmd, inputNameSpace) -> output + + if {[string match ::* $cmd]} { + if {$n > 1} { + # (::foo::bar , *) -> ::foo::bar + return [list $cmd] + } else { + # (::global , *) -> global + return [list [string range $cmd 2 end]] + } + } + + # Potentially returning 2 elements to try : + # (if the current namespace is not the global one) + + if {$n == 0} { + if {$namespace eq "::"} { + # (nocolons , ::) -> nocolons + return [list $cmd] + } else { + # (nocolons , ::sub) -> ::sub::nocolons nocolons + return [list ${namespace}::$cmd $cmd] + } + } elseif {$namespace eq "::"} { + # (foo::bar , ::) -> ::foo::bar + return [list ::$cmd] + } else { + # (foo::bar , ::sub) -> ::sub::foo::bar ::foo::bar + return [list ${namespace}::$cmd ::$cmd] + } +} + +# auto_import -- +# +# Invoked during "namespace import" to make see if the imported commands +# reside in an autoloaded library. If so, the commands are loaded so +# that they will be available for the import links. If not, then this +# procedure does nothing. +# +# Arguments - +# pattern The pattern of commands being imported (like "foo::*") +# a canonical namespace as returned by [namespace current] + +proc auto_import {pattern} { + global auto_index + + # If no namespace is specified, this will be an error case + + if {![string match *::* $pattern]} { + return + } + + set ns [uplevel 1 [list ::namespace current]] + set patternList [auto_qualify $pattern $ns] + + auto_load_index + + foreach pattern $patternList { + foreach name [array names auto_index $pattern] { + if {([namespace which -command $name] eq "") + && ([namespace qualifiers $pattern] eq [namespace qualifiers $name])} { + namespace eval :: $auto_index($name) + } + } + } +} + +# auto_execok -- +# +# Returns string that indicates name of program to execute if +# name corresponds to a shell builtin or an executable in the +# Windows search path, or "" otherwise. Builds an associative +# array auto_execs that caches information about previous checks, +# for speed. +# +# Arguments: +# name - Name of a command. + +if {$tcl_platform(platform) eq "windows"} { +# Windows version. +# +# Note that file executable doesn't work under Windows, so we have to +# look for files with .exe, .com, or .bat extensions. Also, the path +# may be in the Path or PATH environment variables, and path +# components are separated with semicolons, not colons as under Unix. +# +proc auto_execok name { + global auto_execs env tcl_platform + + if {[info exists auto_execs($name)]} { + return $auto_execs($name) + } + set auto_execs($name) "" + + set shellBuiltins [list assoc cls copy date del dir echo erase exit ftype \ + md mkdir mklink move rd ren rename rmdir start time type ver vol] + if {[info exists env(PATHEXT)]} { + # Add an initial ; to have the {} extension check first. + set execExtensions [split ";$env(PATHEXT)" ";"] + } else { + set execExtensions [list {} .com .exe .bat .cmd] + } + + if {[string tolower $name] in $shellBuiltins} { + # When this is command.com for some reason on Win2K, Tcl won't + # exec it unless the case is right, which this corrects. COMSPEC + # may not point to a real file, so do the check. + set cmd $env(COMSPEC) + if {[file exists $cmd]} { + set cmd [file attributes $cmd -shortname] + } + return [set auto_execs($name) [list $cmd /c $name]] + } + + if {[llength [file split $name]] != 1} { + foreach ext $execExtensions { + set file ${name}${ext} + if {[file exists $file] && ![file isdirectory $file]} { + return [set auto_execs($name) [list $file]] + } + } + return "" + } + + set path "[file dirname [info nameof]];.;" + if {[info exists env(SystemRoot)]} { + set windir $env(SystemRoot) + } elseif {[info exists env(WINDIR)]} { + set windir $env(WINDIR) + } + if {[info exists windir]} { + if {$tcl_platform(os) eq "Windows NT"} { + append path "$windir/system32;" + } + append path "$windir/system;$windir;" + } + + foreach var {PATH Path path} { + if {[info exists env($var)]} { + append path ";$env($var)" + } + } + + foreach ext $execExtensions { + unset -nocomplain checked + foreach dir [split $path {;}] { + # Skip already checked directories + if {[info exists checked($dir)] || ($dir eq "")} { + continue + } + set checked($dir) {} + set file [file join $dir ${name}${ext}] + if {[file exists $file] && ![file isdirectory $file]} { + return [set auto_execs($name) [list $file]] + } + } + } + return "" +} + +} else { +# Unix version. +# +proc auto_execok name { + global auto_execs env + + if {[info exists auto_execs($name)]} { + return $auto_execs($name) + } + set auto_execs($name) "" + if {[llength [file split $name]] != 1} { + if {[file executable $name] && ![file isdirectory $name]} { + set auto_execs($name) [list $name] + } + return $auto_execs($name) + } + foreach dir [split $env(PATH) :] { + if {$dir eq ""} { + set dir . + } + set file [file join $dir $name] + if {[file executable $file] && ![file isdirectory $file]} { + set auto_execs($name) [list $file] + return $auto_execs($name) + } + } + return "" +} + +} + +# ::tcl::CopyDirectory -- +# +# This procedure is called by Tcl's core when attempts to call the +# filesystem's copydirectory function fail. The semantics of the call +# are that 'dest' does not yet exist, i.e. dest should become the exact +# image of src. If dest does exist, we throw an error. +# +# Note that making changes to this procedure can change the results +# of running Tcl's tests. +# +# Arguments: +# action - "renaming" or "copying" +# src - source directory +# dest - destination directory +proc tcl::CopyDirectory {action src dest} { + set nsrc [file normalize $src] + set ndest [file normalize $dest] + + if {$action eq "renaming"} { + # Can't rename volumes. We could give a more precise + # error message here, but that would break the test suite. + if {$nsrc in [file volumes]} { + return -code error "error $action \"$src\" to\ + \"$dest\": trying to rename a volume or move a directory\ + into itself" + } + } + if {[file exists $dest]} { + if {$nsrc eq $ndest} { + return -code error "error $action \"$src\" to\ + \"$dest\": trying to rename a volume or move a directory\ + into itself" + } + if {$action eq "copying"} { + # We used to throw an error here, but, looking more closely + # at the core copy code in tclFCmd.c, if the destination + # exists, then we should only call this function if -force + # is true, which means we just want to over-write. So, + # the following code is now commented out. + # + # return -code error "error $action \"$src\" to\ + # \"$dest\": file already exists" + } else { + # Depending on the platform, and on the current + # working directory, the directories '.', '..' + # can be returned in various combinations. Anyway, + # if any other file is returned, we must signal an error. + set existing [glob -nocomplain -directory $dest * .*] + lappend existing {*}[glob -nocomplain -directory $dest \ + -type hidden * .*] + foreach s $existing { + if {[file tail $s] ni {. ..}} { + return -code error "error $action \"$src\" to\ + \"$dest\": file already exists" + } + } + } + } else { + if {[string first $nsrc $ndest] >= 0} { + set srclen [expr {[llength [file split $nsrc]] - 1}] + set ndest [lindex [file split $ndest] $srclen] + if {$ndest eq [file tail $nsrc]} { + return -code error "error $action \"$src\" to\ + \"$dest\": trying to rename a volume or move a directory\ + into itself" + } + } + file mkdir $dest + } + # Have to be careful to capture both visible and hidden files. + # We will also be more generous to the file system and not + # assume the hidden and non-hidden lists are non-overlapping. + # + # On Unix 'hidden' files begin with '.'. On other platforms + # or filesystems hidden files may have other interpretations. + set filelist [concat [glob -nocomplain -directory $src *] \ + [glob -nocomplain -directory $src -types hidden *]] + + foreach s [lsort -unique $filelist] { + if {[file tail $s] ni {. ..}} { + file copy -force -- $s [file join $dest [file tail $s]] + } + } + return +} diff --git a/llava/lib/tcl8.6/parray.tcl b/llava/lib/tcl8.6/parray.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a9c2cb15a9bd81339f2d91535b3758bb4785023e --- /dev/null +++ b/llava/lib/tcl8.6/parray.tcl @@ -0,0 +1,28 @@ +# parray: +# Print the contents of a global array on stdout. +# +# Copyright (c) 1991-1993 The Regents of the University of California. +# Copyright (c) 1994 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +proc parray {a {pattern *}} { + upvar 1 $a array + if {![array exists array]} { + return -code error "\"$a\" isn't an array" + } + set maxl 0 + set names [lsort [array names array $pattern]] + foreach name $names { + if {[string length $name] > $maxl} { + set maxl [string length $name] + } + } + set maxl [expr {$maxl + [string length $a] + 2}] + foreach name $names { + set nameString [format %s(%s) $a $name] + puts stdout [format "%-*s = %s" $maxl $nameString $array($name)] + } +} diff --git a/llava/lib/tcl8.6/tclAppInit.c b/llava/lib/tcl8.6/tclAppInit.c new file mode 100644 index 0000000000000000000000000000000000000000..552f9e4b2426327929f79700720e085794a2a0f8 --- /dev/null +++ b/llava/lib/tcl8.6/tclAppInit.c @@ -0,0 +1,176 @@ +/* + * tclAppInit.c -- + * + * Provides a default version of the main program and Tcl_AppInit + * procedure for tclsh and other Tcl-based applications (without Tk). + * + * Copyright (c) 1993 The Regents of the University of California. + * Copyright (c) 1994-1997 Sun Microsystems, Inc. + * Copyright (c) 1998-1999 Scriptics Corporation. + * + * See the file "license.terms" for information on usage and redistribution of + * this file, and for a DISCLAIMER OF ALL WARRANTIES. + */ + +#undef BUILD_tcl +#undef STATIC_BUILD +#include "tcl.h" +#if TCL_MAJOR_VERSION < 9 && TCL_MINOR_VERSION < 7 +# define Tcl_LibraryInitProc Tcl_PackageInitProc +# define Tcl_StaticLibrary Tcl_StaticPackage +#endif + +#ifdef TCL_TEST +extern Tcl_LibraryInitProc Tcltest_Init; +extern Tcl_LibraryInitProc Tcltest_SafeInit; +#endif /* TCL_TEST */ + +#ifdef TCL_XT_TEST +extern void XtToolkitInitialize(void); +extern Tcl_LibraryInitProc Tclxttest_Init; +#endif /* TCL_XT_TEST */ + +/* + * The following #if block allows you to change the AppInit function by using + * a #define of TCL_LOCAL_APPINIT instead of rewriting this entire file. The + * #if checks for that #define and uses Tcl_AppInit if it does not exist. + */ + +#ifndef TCL_LOCAL_APPINIT +#define TCL_LOCAL_APPINIT Tcl_AppInit +#endif +#ifndef MODULE_SCOPE +# define MODULE_SCOPE extern +#endif +MODULE_SCOPE int TCL_LOCAL_APPINIT(Tcl_Interp *); +MODULE_SCOPE int main(int, char **); + +/* + * The following #if block allows you to change how Tcl finds the startup + * script, prime the library or encoding paths, fiddle with the argv, etc., + * without needing to rewrite Tcl_Main() + */ + +#ifdef TCL_LOCAL_MAIN_HOOK +MODULE_SCOPE int TCL_LOCAL_MAIN_HOOK(int *argc, char ***argv); +#endif + +/* + *---------------------------------------------------------------------- + * + * main -- + * + * This is the main program for the application. + * + * Results: + * None: Tcl_Main never returns here, so this procedure never returns + * either. + * + * Side effects: + * Just about anything, since from here we call arbitrary Tcl code. + * + *---------------------------------------------------------------------- + */ + +int +main( + int argc, /* Number of command-line arguments. */ + char *argv[]) /* Values of command-line arguments. */ +{ +#ifdef TCL_XT_TEST + XtToolkitInitialize(); +#endif + +#ifdef TCL_LOCAL_MAIN_HOOK + TCL_LOCAL_MAIN_HOOK(&argc, &argv); +#elif (TCL_MAJOR_VERSION > 8 || TCL_MINOR_VERSION > 6) && (!defined(_WIN32) || defined(UNICODE)) + /* New in Tcl 8.7. This doesn't work on Windows without UNICODE */ + TclZipfs_AppHook(&argc, &argv); +#endif + + Tcl_Main(argc, argv, TCL_LOCAL_APPINIT); + return 0; /* Needed only to prevent compiler warning. */ +} + +/* + *---------------------------------------------------------------------- + * + * Tcl_AppInit -- + * + * This procedure performs application-specific initialization. Most + * applications, especially those that incorporate additional packages, + * will have their own version of this procedure. + * + * Results: + * Returns a standard Tcl completion code, and leaves an error message in + * the interp's result if an error occurs. + * + * Side effects: + * Depends on the startup script. + * + *---------------------------------------------------------------------- + */ + +int +Tcl_AppInit( + Tcl_Interp *interp) /* Interpreter for application. */ +{ + if ((Tcl_Init)(interp) == TCL_ERROR) { + return TCL_ERROR; + } + +#ifdef TCL_XT_TEST + if (Tclxttest_Init(interp) == TCL_ERROR) { + return TCL_ERROR; + } +#endif + +#ifdef TCL_TEST + if (Tcltest_Init(interp) == TCL_ERROR) { + return TCL_ERROR; + } + Tcl_StaticLibrary(interp, "Tcltest", Tcltest_Init, Tcltest_SafeInit); +#endif /* TCL_TEST */ + + /* + * Call the init procedures for included packages. Each call should look + * like this: + * + * if (Mod_Init(interp) == TCL_ERROR) { + * return TCL_ERROR; + * } + * + * where "Mod" is the name of the module. (Dynamically-loadable packages + * should have the same entry-point name.) + */ + + /* + * Call Tcl_CreateCommand for application-specific commands, if they + * weren't already created by the init procedures called above. + */ + + /* + * Specify a user-specific startup file to invoke if the application is + * run interactively. Typically the startup file is "~/.apprc" where "app" + * is the name of the application. If this line is deleted then no + * user-specific startup file will be run under any conditions. + */ + +#ifdef DJGPP + (Tcl_ObjSetVar2)(interp, Tcl_NewStringObj("tcl_rcFileName", -1), NULL, + Tcl_NewStringObj("~/tclsh.rc", -1), TCL_GLOBAL_ONLY); +#else + (Tcl_ObjSetVar2)(interp, Tcl_NewStringObj("tcl_rcFileName", -1), NULL, + Tcl_NewStringObj("~/.tclshrc", -1), TCL_GLOBAL_ONLY); +#endif + + return TCL_OK; +} + +/* + * Local Variables: + * mode: c + * c-basic-offset: 4 + * fill-column: 78 + * End: + */ diff --git a/llava/lib/tcl8.6/tclIndex b/llava/lib/tcl8.6/tclIndex new file mode 100644 index 0000000000000000000000000000000000000000..a186a7d258d56bad2090c618bdf2bc3645dfd882 --- /dev/null +++ b/llava/lib/tcl8.6/tclIndex @@ -0,0 +1,79 @@ +# Tcl autoload index file, version 2.0 +# -*- tcl -*- +# This file is generated by the "auto_mkindex" command +# and sourced to set up indexing information for one or +# more commands. Typically each line is a command that +# sets an element in the auto_index array, where the +# element name is the name of a command and the value is +# a script that loads the command. + +set auto_index(auto_reset) [list source [file join $dir auto.tcl]] +set auto_index(tcl_findLibrary) [list source [file join $dir auto.tcl]] +set auto_index(auto_mkindex) [list source [file join $dir auto.tcl]] +set auto_index(auto_mkindex_old) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::init) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::cleanup) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::mkindex) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::hook) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::slavehook) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::command) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::commandInit) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::fullname) [list source [file join $dir auto.tcl]] +set auto_index(history) [list source [file join $dir history.tcl]] +set auto_index(::tcl::history) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistAdd) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistKeep) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistClear) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistInfo) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistRedo) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistIndex) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistEvent) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistChange) [list source [file join $dir history.tcl]] +set auto_index(pkg_mkIndex) [list source [file join $dir package.tcl]] +set auto_index(tclPkgSetup) [list source [file join $dir package.tcl]] +set auto_index(tclPkgUnknown) [list source [file join $dir package.tcl]] +set auto_index(::tcl::MacOSXPkgUnknown) [list source [file join $dir package.tcl]] +set auto_index(::pkg::create) [list source [file join $dir package.tcl]] +set auto_index(parray) [list source [file join $dir parray.tcl]] +set auto_index(::safe::InterpStatics) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpNested) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpCreate) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpInit) [list source [file join $dir safe.tcl]] +set auto_index(::safe::CheckInterp) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpConfigure) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpCreate) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpSetConfig) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpFindInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpAddToAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpInit) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AddSubDirs) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpDelete) [list source [file join $dir safe.tcl]] +set auto_index(::safe::setLogCmd) [list source [file join $dir safe.tcl]] +set auto_index(::safe::SyncAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::PathToken) [list source [file join $dir safe.tcl]] +set auto_index(::safe::TranslatePath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::Log) [list source [file join $dir safe.tcl]] +set auto_index(::safe::CheckFileName) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasGlob) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasSource) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasLoad) [list source [file join $dir safe.tcl]] +set auto_index(::safe::FileInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::DirInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::Subset) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasSubset) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasEncoding) [list source [file join $dir safe.tcl]] +set auto_index(tcl_wordBreakAfter) [list source [file join $dir word.tcl]] +set auto_index(tcl_wordBreakBefore) [list source [file join $dir word.tcl]] +set auto_index(tcl_endOfWord) [list source [file join $dir word.tcl]] +set auto_index(tcl_startOfNextWord) [list source [file join $dir word.tcl]] +set auto_index(tcl_startOfPreviousWord) [list source [file join $dir word.tcl]] +set auto_index(::tcl::tm::add) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::remove) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::list) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::Defaults) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::UnknownHandler) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::roots) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::path) [list source [file join $dir tm.tcl]] +if {[namespace exists ::tcl::unsupported]} { + set auto_index(timerate) {namespace import ::tcl::unsupported::timerate} +} diff --git a/llava/lib/tcl8.6/tm.tcl b/llava/lib/tcl8.6/tm.tcl new file mode 100644 index 0000000000000000000000000000000000000000..02007d536738204224dfe8a562e71eaa0434e584 --- /dev/null +++ b/llava/lib/tcl8.6/tm.tcl @@ -0,0 +1,380 @@ +# -*- tcl -*- +# +# Searching for Tcl Modules. Defines a procedure, declares it as the primary +# command for finding packages, however also uses the former 'package unknown' +# command as a fallback. +# +# Locates all possible packages in a directory via a less restricted glob. The +# targeted directory is derived from the name of the requested package, i.e. +# the TM scan will look only at directories which can contain the requested +# package. It will register all packages it found in the directory so that +# future requests have a higher chance of being fulfilled by the ifneeded +# database without having to come to us again. +# +# We do not remember where we have been and simply rescan targeted directories +# when invoked again. The reasoning is this: +# +# - The only way we get back to the same directory is if someone is trying to +# [package require] something that wasn't there on the first scan. +# +# Either +# 1) It is there now: If we rescan, you get it; if not you don't. +# +# This covers the possibility that the application asked for a package +# late, and the package was actually added to the installation after the +# application was started. It should still be able to find it. +# +# 2) It still is not there: Either way, you don't get it, but the rescan +# takes time. This is however an error case and we don't care that much +# about it +# +# 3) It was there the first time; but for some reason a "package forget" has +# been run, and "package" doesn't know about it anymore. +# +# This can be an indication that the application wishes to reload some +# functionality. And should work as well. +# +# Note that this also strikes a balance between doing a glob targeting a +# single package, and thus most likely requiring multiple globs of the same +# directory when the application is asking for many packages, and trying to +# glob for _everything_ in all subdirectories when looking for a package, +# which comes with a heavy startup cost. +# +# We scan for regular packages only if no satisfying module was found. + +namespace eval ::tcl::tm { + # Default paths. None yet. + + variable paths {} + + # The regex pattern a file name has to match to make it a Tcl Module. + + set pkgpattern {^([_[:alpha:]][:_[:alnum:]]*)-([[:digit:]].*)[.]tm$} + + # Export the public API + + namespace export path + namespace ensemble create -command path -subcommands {add remove list} +} + +# ::tcl::tm::path implementations -- +# +# Public API to the module path. See specification. +# +# Arguments +# cmd - The subcommand to execute +# args - The paths to add/remove. Must not appear querying the +# path with 'list'. +# +# Results +# No result for subcommands 'add' and 'remove'. A list of paths for +# 'list'. +# +# Side effects +# The subcommands 'add' and 'remove' manipulate the list of paths to +# search for Tcl Modules. The subcommand 'list' has no side effects. + +proc ::tcl::tm::add {args} { + # PART OF THE ::tcl::tm::path ENSEMBLE + # + # The path is added at the head to the list of module paths. + # + # The command enforces the restriction that no path may be an ancestor + # directory of any other path on the list. If the new path violates this + # restriction an error will be raised. + # + # If the path is already present as is no error will be raised and no + # action will be taken. + + variable paths + + # We use a copy of the path as source during validation, and extend it as + # well. Because we not only have to detect if the new paths are bogus with + # respect to the existing paths, but also between themselves. Otherwise we + # can still add bogus paths, by specifying them in a single call. This + # makes the use of the new paths simpler as well, a trivial assignment of + # the collected paths to the official state var. + + set newpaths $paths + foreach p $args { + if {$p in $newpaths} { + # Ignore a path already on the list. + continue + } + + # Search for paths which are subdirectories of the new one. If there + # are any then the new path violates the restriction about ancestors. + + set pos [lsearch -glob $newpaths ${p}/*] + # Cannot use "in", we need the position for the message. + if {$pos >= 0} { + return -code error \ + "$p is ancestor of existing module path [lindex $newpaths $pos]." + } + + # Now look for existing paths which are ancestors of the new one. This + # reverse question forces us to loop over the existing paths, as each + # element is the pattern, not the new path :( + + foreach ep $newpaths { + if {[string match ${ep}/* $p]} { + return -code error \ + "$p is subdirectory of existing module path $ep." + } + } + + set newpaths [linsert $newpaths 0 $p] + } + + # The validation of the input is complete and successful, and everything + # in newpaths is either an old path, or added. We can now extend the + # official list of paths, a simple assignment is sufficient. + + set paths $newpaths + return +} + +proc ::tcl::tm::remove {args} { + # PART OF THE ::tcl::tm::path ENSEMBLE + # + # Removes the path from the list of module paths. The command is silently + # ignored if the path is not on the list. + + variable paths + + foreach p $args { + set pos [lsearch -exact $paths $p] + if {$pos >= 0} { + set paths [lreplace $paths $pos $pos] + } + } +} + +proc ::tcl::tm::list {} { + # PART OF THE ::tcl::tm::path ENSEMBLE + + variable paths + return $paths +} + +# ::tcl::tm::UnknownHandler -- +# +# Unknown handler for Tcl Modules, i.e. packages in module form. +# +# Arguments +# original - Original [package unknown] procedure. +# name - Name of desired package. +# version - Version of desired package. Can be the +# empty string. +# exact - Either -exact or omitted. +# +# Name, version, and exact are used to determine satisfaction. The +# original is called iff no satisfaction was achieved. The name is also +# used to compute the directory to target in the search. +# +# Results +# None. +# +# Side effects +# May populate the package ifneeded database with additional provide +# scripts. + +proc ::tcl::tm::UnknownHandler {original name args} { + # Import the list of paths to search for packages in module form. + # Import the pattern used to check package names in detail. + + variable paths + variable pkgpattern + + # Without paths to search we can do nothing. (Except falling back to the + # regular search). + + if {[llength $paths]} { + set pkgpath [string map {:: /} $name] + set pkgroot [file dirname $pkgpath] + if {$pkgroot eq "."} { + set pkgroot "" + } + + # We don't remember a copy of the paths while looping. Tcl Modules are + # unable to change the list while we are searching for them. This also + # simplifies the loop, as we cannot get additional directories while + # iterating over the list. A simple foreach is sufficient. + + set satisfied 0 + foreach path $paths { + if {![interp issafe] && ![file exists $path]} { + continue + } + set currentsearchpath [file join $path $pkgroot] + if {![interp issafe] && ![file exists $currentsearchpath]} { + continue + } + set strip [llength [file split $path]] + + # Get the module files out of the subdirectories. + # - Safe Base interpreters have a restricted "glob" command that + # works in this case. + # - The "catch" was essential when there was no safe glob and every + # call in a safe interp failed; it is retained only for corner + # cases in which the eventual call to glob returns an error. + + catch { + # We always look for _all_ possible modules in the current + # path, to get the max result out of the glob. + + foreach file [glob -nocomplain -directory $currentsearchpath *.tm] { + set pkgfilename [join [lrange [file split $file] $strip end] ::] + + if {![regexp -- $pkgpattern $pkgfilename --> pkgname pkgversion]} { + # Ignore everything not matching our pattern for + # package names. + continue + } + try { + package vcompare $pkgversion 0 + } on error {} { + # Ignore everything where the version part is not + # acceptable to "package vcompare". + continue + } + + if {([package ifneeded $pkgname $pkgversion] ne {}) + && (![interp issafe]) + } { + # There's already a provide script registered for + # this version of this package. Since all units of + # code claiming to be the same version of the same + # package ought to be identical, just stick with + # the one we already have. + # This does not apply to Safe Base interpreters because + # the token-to-directory mapping may have changed. + continue + } + + # We have found a candidate, generate a "provide script" + # for it, and remember it. Note that we are using ::list + # to do this; locally [list] means something else without + # the namespace specifier. + + # NOTE. When making changes to the format of the provide + # command generated below CHECK that the 'LOCATE' + # procedure in core file 'platform/shell.tcl' still + # understands it, or, if not, update its implementation + # appropriately. + # + # Right now LOCATE's implementation assumes that the path + # of the package file is the last element in the list. + + package ifneeded $pkgname $pkgversion \ + "[::list package provide $pkgname $pkgversion];[::list source -encoding utf-8 $file]" + + # We abort in this unknown handler only if we got a + # satisfying candidate for the requested package. + # Otherwise we still have to fallback to the regular + # package search to complete the processing. + + if {($pkgname eq $name) + && [package vsatisfies $pkgversion {*}$args]} { + set satisfied 1 + + # We do not abort the loop, and keep adding provide + # scripts for every candidate in the directory, just + # remember to not fall back to the regular search + # anymore. + } + } + } + } + + if {$satisfied} { + return + } + } + + # Fallback to previous command, if existing. See comment above about + # ::list... + + if {[llength $original]} { + uplevel 1 $original [::linsert $args 0 $name] + } +} + +# ::tcl::tm::Defaults -- +# +# Determines the default search paths. +# +# Arguments +# None +# +# Results +# None. +# +# Side effects +# May add paths to the list of defaults. + +proc ::tcl::tm::Defaults {} { + global env tcl_platform + + regexp {^(\d+)\.(\d+)} [package provide Tcl] - major minor + set exe [file normalize [info nameofexecutable]] + + # Note that we're using [::list], not [list] because [list] means + # something other than [::list] in this namespace. + roots [::list \ + [file dirname [info library]] \ + [file join [file dirname [file dirname $exe]] lib] \ + ] + + if {$tcl_platform(platform) eq "windows"} { + set sep ";" + } else { + set sep ":" + } + for {set n $minor} {$n >= 0} {incr n -1} { + foreach ev [::list \ + TCL${major}.${n}_TM_PATH \ + TCL${major}_${n}_TM_PATH \ + ] { + if {![info exists env($ev)]} continue + foreach p [split $env($ev) $sep] { + path add $p + } + } + } + return +} + +# ::tcl::tm::roots -- +# +# Public API to the module path. See specification. +# +# Arguments +# paths - List of 'root' paths to derive search paths from. +# +# Results +# No result. +# +# Side effects +# Calls 'path add' to paths to the list of module search paths. + +proc ::tcl::tm::roots {paths} { + regexp {^(\d+)\.(\d+)} [package provide Tcl] - major minor + foreach pa $paths { + set p [file join $pa tcl$major] + for {set n $minor} {$n >= 0} {incr n -1} { + set px [file join $p ${major}.${n}] + if {![interp issafe]} {set px [file normalize $px]} + path add $px + } + set px [file join $p site-tcl] + if {![interp issafe]} {set px [file normalize $px]} + path add $px + } + return +} + +# Initialization. Set up the default paths, then insert the new handler into +# the chain. + +if {![interp issafe]} {::tcl::tm::Defaults} diff --git a/parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b687227aee9830023751261df5e67856f4965589 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/__pycache__/_torch_docs.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3dc594d5d6bfc95f658b6b65d274f873ce0eded4c018145fe10804b39f09153 +size 406628 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d7687cde3f74348ad1f73deacc2af1d03c8da9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoFunctor.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +namespace c10 { +namespace impl { + namespace detail { + template class WrapFunctionIntoFunctor_ {}; + template + class WrapFunctionIntoFunctor_> final : public c10::OperatorKernel { + public: + C10_ALWAYS_INLINE decltype(auto) operator()(Parameters... args) { + return (*FuncPtr::func_ptr())(std::forward(args)...); + } + }; + } + + // WrapFunctionIntoFunctor: Wraps a compile time function pointer into a kernel functor. + // Since it is a compile time function pointer, many compilers can inline it + // into the wrapper and you don't get any performance overhead for wrapping. + template + struct WrapFunctionIntoFunctor final { + static_assert(c10::is_compile_time_function_pointer::value, "WrapFunctionIntoFunctor can only wrap functions created with TORCH_FN."); + using type = detail::WrapFunctionIntoFunctor_< + FuncPtr, + typename guts::function_traits::return_type, + typename guts::function_traits::parameter_types + >; + }; +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h new file mode 100644 index 0000000000000000000000000000000000000000..9cd647597d42d461431164ec76a16ccccc75063e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/WrapFunctionIntoRuntimeFunctor.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +namespace c10 { + +namespace impl { + namespace detail { + template class WrapFunctionIntoRuntimeFunctor_ {}; + template + class WrapFunctionIntoRuntimeFunctor_> final : public c10::OperatorKernel { + public: + template + explicit WrapFunctionIntoRuntimeFunctor_(FuncType_&& kernel_func) + : kernel_func_(std::forward(kernel_func)) {} + + decltype(auto) operator()(Parameters... args) { + return kernel_func_(std::forward(args)...); + } + + private: + FuncType kernel_func_; + }; + } + + // WrapFunctionIntoRuntimeFunctor: Wraps any runtime functor into a functor that + // inherits from c10::OperatorKernel, so it can be used as a c10 kernel. + // This can, for example, be used for lambdas, functors or even function pointers. + // In the case of function pointers, since it is a runtime function pointer, + // there is an overhead for calling it whenever the kernel is invoked. + template + using WrapFunctionIntoRuntimeFunctor = detail::WrapFunctionIntoRuntimeFunctor_< + FuncType, + typename guts::infer_function_traits_t::return_type, + typename guts::infer_function_traits_t::parameter_types + >; +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h new file mode 100644 index 0000000000000000000000000000000000000000..82fdd824ea65b8cee102a075cd4085011b5675ac --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/boxing.h @@ -0,0 +1,395 @@ +#pragma once + +// This file contains boxing (not unboxing) logic, +// i.e. how to make a vector from a set of concrete arguments. + +#include +#include +#include + +#include + +#include +#include + +namespace c10 { +namespace impl { + +// +// utils +// + +// is_mutable_tensor_ref +template struct is_mutable_tensor_ref : std::false_type {}; +template <> struct is_mutable_tensor_ref : std::true_type {}; + +// is_tuple_of_mutable_tensor_refs +// +template +struct is_tuple_of_mutable_tensor_refs : std::false_type {}; + +template +struct is_tuple_of_mutable_tensor_refs::value, void>> +: guts::typelist::all> +{}; + +// has_ivalue_to tests the presence/absence of instance method IValue::to() +// +template +struct has_ivalue_to : std::false_type {}; + +template +struct ivalue_to_helper +{ + using type = decltype(std::declval().template to()); +}; +template +using ivalue_to_helper_t = typename ivalue_to_helper::type; + +template +struct has_ivalue_to>> +: std::true_type +{}; + +// +// boxing predicates +// + +// A boxable arg type is one that IValue has a constructor for. +template +using can_box = + std::disjunction< + std::is_constructible>, + // TensorOptions are not directly constructible into IValue, + // but torch::jit::push knows how to handle them + std::is_same> + >; + +template +using can_box_all = std::conjunction...>; + +// an unboxable result is one that can be extracted from an IValue +template +using can_unbox = + std::conjunction< + std::disjunction< + has_ivalue_to, + // void returns are ok + std::is_same + >, + std::negation> + >; + +// +// boxArgs - utility for pushing unboxed args onto IValue stack +// +template +torch::jit::Stack boxArgs(Args... args) { + // TODO Reuse stack vector instead of allocating? + torch::jit::Stack stack; + stack.reserve(sizeof...(Args)); + torch::jit::push(stack, std::forward(args)...); + return stack; +} + +template +static inline constexpr size_t boxed_size_one() { + static_assert(!std::is_same, c10::TensorOptions>::value, "need to patch this path to support TensorOptions passed by reference"); + return 1; +} + +// torch::jit::push pushes 4 values for a TensorOptions; this needs to +// be kept in sync. +template <> +inline constexpr size_t boxed_size_one() { + return 4; +} + +// NOTE: this could probably be simplified with C++17 fold expressions. +template +struct BoxedSize : std::integral_constant {}; +template +struct BoxedSize : std::integral_constant() + BoxedSize::value> {}; + +template +static inline constexpr size_t boxed_size() { + return BoxedSize::value; +} + +using IValueAlignedStorage = std::aligned_storage_t; + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, T& arg, int& lastIdx) { + new (&dest[lastIdx]) IValue(arg); + lastIdx++; +} + +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxToStack(IValueAlignedStorage* dest, c10::TensorOptions options, int& lastIdx) { + new (&dest[lastIdx++]) IValue(c10::typeMetaToScalarType(options.dtype())); + new (&dest[lastIdx++]) IValue(options.layout()); + new (&dest[lastIdx++]) IValue(options.device()); + new (&dest[lastIdx++]) IValue(options.pinned_memory()); +} + +inline void boxArgsToStack(IValueAlignedStorage*, int&) {} + +template +C10_ALWAYS_INLINE_UNLESS_MOBILE void boxArgsToStack(IValueAlignedStorage* dest, int& lastIdx, T& arg, Args &... args) { + boxToStack(dest, arg, lastIdx); + boxArgsToStack(dest, lastIdx, args...); +} + +// +// PopResult is a helper class whose specializations handle popping single and +// multiple return values, respectively. +// +template +struct PopResult final { + static Result call(Stack& stack) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return one value on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return std::move(stack[0]).to(); + } +}; + +template +struct PopResult> final { + using Result = std::tuple; + + static Result call(Stack& stack) { + // for tuple return types, boxed kernel has pushed multiple values onto the stack + constexpr int RetCount = sizeof...(Types); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead pushed ", stack.size(), " values." + ); + return pop_to_tuple_impl(stack, std::make_index_sequence()); + } +private: + // note: this has been moved into its own helper only to avoid a parse error on `indices` otherwise. + // I'm sure there's an incantation that slips it past the parser but eh + template + static Result pop_to_tuple_impl(Stack& stack, std::index_sequence) { + return std::make_tuple((std::move(stack[indices]).to())...); + } +}; + +// +// BoxedKernelWrapper +// +// For a given function type FT, BoxedKernelWrapper implements +// a `call` method that +// - takes a boxed kernel and unboxed arguments as specified by FT, +// - calls `boxArgs` to box the arguments +// - calls the boxed kernel +// - unboxes and returns the result +// +// The partial specializations below handle various cases: in +// particular, not all types appearing in op signatures are supported, +// and ops returning references have nonstandard wrapper implementations. +// + +// 1. The base specialization of BoxedKernelWrapper should never be instantiated. +// A "no call method defined on BoxedKernelWrapper" compile error means that +// an op signature has failed to trigger any of the partial specializations +// that follow this one. +// +template +struct BoxedKernelWrapper { + // The reason we're not just doing straight up static_assert(false, ...) here: + // Basically, the way to make sure a static_assert only fires if a template + // is actually instantiated (rather than every time the file is parsed) is to use + // template parameters in the expression, e.g. FuncType here. However, since + // `sizeof(FuncType) != sizeof(FuncType)` is always false, this has the same + // effect. + static_assert(sizeof(FuncType) != sizeof(FuncType), + "Function signature contains one or more unsupported parameter and/or return types. " + "Look for a nearby error like " + "\"'call' is not a member of 'c10::impl::BoxedKernelWrapper<(your function type), void>'\" " + "- (your function type) is the unsupported signature."); +}; + +// +// 2. Supported signatures, other than those involving non-const Tensor refs - +// i.e., "functional" ops. +// + +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && can_unbox::value && !is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + + if constexpr (!std::is_same_v) { + // op has pushed one or more values onto the stack. + return PopResult::call(stack); + } else { + // op returns void, boxed kernel has pushed nothing onto stack. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.empty(), + "Boxed kernel was expected to return no values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + } + } +}; + +// +// 3. in-place ops take a single non-const Tensor reference +// as their first argument, and return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// Because of this, the generated BoxedKernelWrapper specializations simply +// return the in-place argument. +// + +template +struct BoxedKernelWrapper< + at::Tensor&(at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, std::forward(otherArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 3.5. In-process migration to make in-place ops take and return +// const references instead. +template +struct BoxedKernelWrapper< + const at::Tensor&(const at::Tensor&, OtherArgs...), + std::enable_if_t::value, void> +> { + static const at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + const at::Tensor& outArg, OtherArgs... otherArgs + ) { + torch::jit::Stack stack = boxArgs(outArg, otherArgs...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + return outArg; + } +}; + +// +// 4. out of place ops that take a single non-const Tensor reference as their +// final argument, and also return it. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return out arguments. +// +template +struct BoxedKernelWrapper< + at::Tensor&(FirstArg, RestArgs...), + std::enable_if_t< + can_box_all::value + // this skips over in-place kernels with a non-const Tensor + // arg at the front, so those can unambiguously trigger the preceding specialization. + && !is_mutable_tensor_ref::value, + void + > +> { + static at::Tensor& call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + FirstArg firstArg, RestArgs... restArgs + ) { + torch::jit::Stack stack = boxArgs(std::forward(firstArg), std::forward(restArgs)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == 1, + "Boxed kernel was expected to return a single value on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing restArgs after it has been forwarded here is ok because we know + // that the last element is of type `Tensor&`. + return std::get(std::tuple{restArgs...}); + } +}; + +// +// 5. out of place ops that take multiple non-const Tensor references as their +// final arguments, and return them in a std::tuple. +// +// Note: all signatures matching this pattern are assumed to be for such ops. +// This assumption permits the generated BoxedKernelWrapper specializations to simply +// return the out arguments. +// +template +struct BoxedKernelWrapper< + Result(Args...), + std::enable_if_t< + can_box_all::value && is_tuple_of_mutable_tensor_refs::value, + void + > +> { + static Result call( + const BoxedKernel& boxed_kernel_func, + const OperatorHandle& opHandle, + DispatchKeySet dispatchKeySet, + Args... args + ) { + using ArgTuple = std::tuple; + constexpr int RetCount = std::tuple_size(); + + torch::jit::Stack stack = boxArgs(std::forward(args)...); + boxed_kernel_func.callBoxed(opHandle, dispatchKeySet, &stack); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + stack.size() == RetCount, + "Boxed kernel was expected to return ", RetCount, " values on the stack, ", + "but instead returned ", stack.size(), " values." + ); + + // reusing args after it has been forwarded here is ok because we know + // that the last RetCount elements are of type `Tensor&`. + auto result = guts::tuple_take(ArgTuple{std::forward(args)...}); + static_assert( + std::is_same::value, + "The parameter list of an op returning a tuple of Tensor references " + "must end with an equal number of Tensor reference parameters." + ); + return result; + } +}; + +} // impl +} // c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..93b11dc853f00f2ac06ebfd361b6ee02986cfd1f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/boxing/impl/test_helpers.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +template +inline std::vector makeStack(Inputs&&... inputs) { + return {std::forward(inputs)...}; +} + +inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) { + auto* allocator = c10::GetCPUAllocator(); + int64_t nelements = 1; + auto dtype = caffe2::TypeMeta::Make(); + int64_t size_bytes = nelements * dtype.itemsize(); + auto storage_impl = c10::make_intrusive( + c10::StorageImpl::use_byte_size_t(), + size_bytes, + allocator->allocate(size_bytes), + allocator, + /*resizable=*/true); + at::Tensor t = at::detail::make_tensor(storage_impl, ks, dtype); + // TODO: We add this to simulate the ideal case where we only have Autograd backend keys + // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl + // constructor by default. + if (!requires_grad) { + t.unsafeGetTensorImpl()->remove_autograd_key(); + } + return t; +} + +inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) { + return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad); +} + +template +inline std::vector callOp(const c10::OperatorHandle& op, Args... args) { + auto stack = makeStack(std::forward(args)...); + op.callBoxed(&stack); + return stack; +} + +template +inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) { + return op.typed().call(std::forward(args)...); +} + +template +inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) { + return op.typed().callWithDispatchKey(dispatchKey, std::forward(args)...); +} + +template +inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) { + return op.typed().redispatch(ks, std::forward(args)...); +} + +inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_ANY_THROW( + callOp(*op, dummyTensor(dispatch_key), 5); + ); +} + +inline void expectDoesntFindOperator(const char* op_name) { + auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); + EXPECT_FALSE(op.has_value()); +} + +template +inline void expectThrows(Functor&& functor, const char* expectMessageContains) { + try { + std::forward(functor)(); + } catch (const Exception& e) { + EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains)); + return; + } + ADD_FAILURE() << "Expected to throw exception containing \"" + << expectMessageContains << "\" but didn't throw"; +} + +template +void expectListEquals(c10::ArrayRef expected, std::array actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::ArrayRef actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +template +void expectListEquals(c10::ArrayRef expected, c10::List actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual.get(i)); + } +} + +template +void expectListEquals(c10::ArrayRef expected, std::vector actual) { + EXPECT_EQ(expected.size(), actual.size()); + for (const auto i : c10::irange(expected.size())) { + EXPECT_EQ(expected[i], actual[i]); + } +} + +// NB: This is not really sound, but all of the type sets constructed here +// are singletons so it's fine +static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) { + return legacyExtractDispatchKey(t.key_set()); +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h new file mode 100644 index 0000000000000000000000000000000000000000..0a152a60d923d1753ba6a2f373afad34d28aba02 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/CppSignature.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +namespace impl { + +// A CppSignature object holds RTTI information about a C++ function signature at runtime +// and can compare them or get a debug-printable name. +class TORCH_API CppSignature final { +public: + CppSignature(const CppSignature&) = default; + CppSignature(CppSignature&&) noexcept = default; + CppSignature& operator=(const CppSignature&) = default; + CppSignature& operator=(CppSignature&&) noexcept = default; + + template + static CppSignature make() { + // Normalize functors, lambdas, function pointers, etc. into the plain function type + // The first argument of the schema might be of type DispatchKeySet, in which case we remove it. + // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered + // with different calling conventions. + // See Note [Plumbing Keys Through The Dispatcher] + using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func>::func_type; + + return CppSignature(std::type_index(typeid(decayed_function_type))); + } + + std::string name() const { + return c10::demangle(signature_.name()); + } + + friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) { + if (lhs.signature_ == rhs.signature_) { + return true; + } + // Without RTLD_GLOBAL, the type_index comparison could yield false because + // they point to different instances of the RTTI data, but the types would + // still be the same. Let's check for that case too. + // Note that there still is a case where this might not work, i.e. when + // linking libraries of different compilers together, they might have + // different ways to serialize a type name. That, together with a missing + // RTLD_GLOBAL, would still fail this. + if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) { + return true; + } + + return false; + } + +private: + explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {} + std::type_index signature_; +}; + +inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) { + return !(lhs == rhs ); +} + +} +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h new file mode 100644 index 0000000000000000000000000000000000000000..4a345facaa94eaa6014f14c30f2847b5b9bc33a6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/DispatchKeyExtractor.h @@ -0,0 +1,242 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace impl { + +// Take a DispatchKeySet for a Tensor and determine what the actual dispatch +// DispatchKey should be, taking into account TLS, and skipping backends which +// fall through. +// +// Unlike Tensor::key_set(), the value of this on a tensor can change depending +// on TLS. +// +// NB: If there is no valid dispatch key, this will return Undefined +inline DispatchKeySet computeDispatchKeySet( + DispatchKeySet ks, + // The key mask lets us eliminate (by zero entries) keys which should not + // be considered for dispatch. There are two cases when we use this: + // + // - If an operator's dispatch table contains a fallthrough entry, we + // should bypass it entirely when finding the key + // - If a user invokes with redispatch, the mask lets us + // zero out the key the user asked us to stop. + // + // These excluded backends are NOT tracked in the TLS, but must be applied + // AFTER TLS (since the backend may have been introduced for consideration + // by the included TLS), which is why you have to pass them in to this + // function (as opposed to just applying it to the input 'ks'). + DispatchKeySet key_mask +) { + c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set(); + // TODO: It's a bit irritating that we have to do logical ORs here, it would + // be nice to only do one. Can always_included be folded into the TLS? Well, + // it's a bit troublesome, because fastpath TLS access requires the type of + // the TLS in question to be zero-initialized, so you don't actually win + // anyting in that case. + return (((ks | local.included_) - local.excluded_) & key_mask); +} + +} + +namespace detail { + // A small gadget to extract the DispatchKeySet from types which are known + // to have it. Used to extract dispatch keys from unboxed calls. + struct MultiDispatchKeySet : at::IterArgs { + DispatchKeySet ts; + void operator()(const at::Tensor& x) { + ts = ts | x.key_set(); + } + void operator()(const std::optional& x) { + if (x.has_value()) { + ts = ts | x->key_set(); + } + } + void operator()(at::ArrayRef xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + // Tensor?[] translates to this case. + void operator()(const c10::List>& xs) { + for (std::optional x : xs) { + if (x.has_value()) { + ts = ts | x.value().key_set(); + } + } + } + // Structured Tensor[] translates to this case + void operator()(const at::ITensorListRef& xs) { + for (const auto& x : xs) { + ts = ts | x.key_set(); + } + } + [[noreturn]] void operator()(at::ArrayRef>) { + // Just checking that the handling of Tensor?[] didn't change. + TORCH_INTERNAL_ASSERT(false); + } + void operator()(const at::Generator& gen) { + if (gen.defined()) { + ts = ts | gen.key_set(); + } + } + void operator()(const std::optional& gen) { + if (gen.has_value() && gen->defined()) { + ts = ts | gen->key_set(); + } + } + template + void operator()(const T&) { + // do nothing + } + }; + + // NB: take by const reference (Don't do universal forwarding here! You + // don't want to move into this function!) + template + DispatchKeySet multi_dispatch_key_set(const Args&... args) { + return MultiDispatchKeySet().apply(args...).ts; + } +} + +/** + * An instance of DispatchKeyExtractor knows how to get a dispatch key given + * a list of arguments for an operator call. + * + * The instance is specific for a certain operator as: + * - In boxed dispatch, different operators have different ways to extract + * the dispatch key (e.g. different numbers of arguments), and we precompute + * the stack locations we should look at; and + * - In all dispatch, some backends should be excluded from dispatch because + * they have been registered as fallthrough. The set of excluded backends + * varies from operator, as some operators may have overridden the + * fallthrough with custom behavior. + * + * Note - this should maintain identical impl to the py dispatcher key extraction logic + * at pytorch/torch/dispatcher.py + */ +struct TORCH_API DispatchKeyExtractor final { +public: + static DispatchKeyExtractor make(const FunctionSchema& schema) { + return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema)); + } + + static DispatchKeyExtractor makeUninitialized() { + return DispatchKeyExtractor(c10::utils::bitset()); + } + + void registerSchema(const FunctionSchema& schema) { + TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset()); + dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema); + } + void deregisterSchema() { + dispatch_arg_indices_reverse_ = c10::utils::bitset(); + } + + DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const { + DispatchKeySet ks; + dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) { + const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1); + if (C10_LIKELY(ivalue.isTensor())) { + // NB: Take care not to introduce a refcount bump (there's + // no safe toTensorRef method, alas) + ks = ks | ivalue.unsafeToTensorImpl()->key_set(); + } else if (C10_UNLIKELY(ivalue.isTensorList())) { + for (const at::Tensor& tensor : ivalue.toTensorList()) { + ks = ks | tensor.key_set(); + } + } + // Tensor?[] translates to a c10::List so we need to peek inside + else if (C10_UNLIKELY(ivalue.isList())) { + for (const auto& elt : ivalue.toListRef()) { + if (elt.isTensor()) { + ks = ks | elt.toTensor().key_set(); + } + } + } + }); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + template + DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const { + auto ks = detail::multi_dispatch_key_set(args...); + // Keys that are fallthrough should be skipped + if (requiresBitsetPerBackend_) { + auto backend_idx = ks.getBackendIndex(); + return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); + } else { + return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); + } + } + + void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough); + + std::string dumpState() const; + void checkInvariants(const FunctionSchema& schema) const; + +private: + static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) { + TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(), + "The function schema has ", schema.arguments().size(), + " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS()); + c10::utils::bitset dispatch_arg_indices_reverse; + for (const auto index : c10::irange(schema.arguments().size())) { + if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *ListType::ofOptionalTensors()) || + schema.arguments()[index].type()->isSubtypeOf( + *OptionalType::ofTensor())) { + dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index); + } + } + return dispatch_arg_indices_reverse; + } + + explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse) + : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse) + , nonFallthroughKeys_(DispatchKeySet::FULL) + , requiresBitsetPerBackend_(false) { + for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) { + nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL; + } + } + + // this is a bitset that has ones for each argument index which has to be + // considered for dispatch. This avoids having to iterate over the stack + // to find all the tensors. The bits are stored in reverse order, i.e. + // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from + // the top of the stack (i.e. the i-th last argument of the function) + // is relevant for dispatch. + // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the + // fallthrough + c10::utils::bitset dispatch_arg_indices_reverse_; + + // Set of functionality keys for which the operator does NOT have fallthrough kernel. + DispatchKeySet nonFallthroughKeys_; + // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND. + // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends. + std::array nonFallthroughKeysPerBackend_; + // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path), + // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_ + bool requiresBitsetPerBackend_; +}; + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h new file mode 100644 index 0000000000000000000000000000000000000000..a8f1418e6286e04c1df3c8be6777544f07b13afc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/Dispatcher.h @@ -0,0 +1,799 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef NDEBUG +#include +#endif + +namespace c10 { + +TORCH_API bool show_dispatch_trace(); +TORCH_API void dispatch_trace_nesting_incr(); +TORCH_API void dispatch_trace_nesting_decr(); +TORCH_API int64_t dispatch_trace_nesting_value(); + +struct DispatchTraceNestingGuard { + DispatchTraceNestingGuard() { dispatch_trace_nesting_incr(); } + ~DispatchTraceNestingGuard() { dispatch_trace_nesting_decr(); } +}; + +class TORCH_API OperatorHandle; +template class TypedOperatorHandle; + +/** + * Implement this interface and register your instance with the dispatcher + * to get notified when operators are registered or deregistered with + * the dispatcher. + * + * NB: registration events only occur when a 'def' occurs; we don't trigger + * on 'impl' or 'fallback' calls. + */ +class TORCH_API OpRegistrationListener { +public: + virtual ~OpRegistrationListener(); + + virtual void onOperatorRegistered(const OperatorHandle& op) = 0; + virtual void onOperatorDeregistered(const OperatorHandle& op) = 0; +}; + +namespace detail { +class RegistrationListenerList; +} +class SchemaRegistrationHandleRAII; + +/** + * Top-level dispatch interface for dispatching via the dynamic dispatcher. + * Most end users shouldn't use this directly; if you're trying to register + * ops look in op_registration + */ +class TORCH_API Dispatcher final { +private: + // For direct access to backend fallback information + friend class impl::OperatorEntry; + + struct OperatorDef final { + explicit OperatorDef(OperatorName&& op_name) + : op(std::move(op_name)) {} + + impl::OperatorEntry op; + + // These refer to the number of outstanding RegistrationHandleRAII + // for this operator. def_count reflects only def() registrations + // (in the new world, this should only ever be 1, but old style + // registrations may register the schema multiple times, which + // will increase this count). def_and_impl_count reflects the number + // of combined def() and impl() registrations. When the last def() gets + // unregistered, we must immediately call the Deregistered listeners, but we + // must not actually delete the handle as there are other outstanding RAII + // destructors which will try to destruct and they had better still have a + // working operator handle in this case + size_t def_count = 0; + size_t def_and_impl_count = 0; + }; + friend class OperatorHandle; + template friend class TypedOperatorHandle; + + struct Guard final { + Guard() : alive(true), mutex() {} + std::atomic alive; + std::mutex mutex; + }; + +public: + ~Dispatcher(); + + // Implementation note: this class abstracts over the fact that we have per-operator + // dispatch tables. This could be easily adjusted to have a single global hash + // table. + static Dispatcher& realSingleton(); + + C10_ALWAYS_INLINE static Dispatcher& singleton() { +#if !defined C10_MOBILE + // Implemented inline so that steady-state code needn't incur + // function-call overhead. We can't just inline `realSingleton` + // because the function-local static would get duplicated across + // all DSOs that include & use this header, leading to multiple + // singleton instances. + static Dispatcher& s = realSingleton(); + return s; +#else + // For C10_MOBILE, we should never inline a static function that + // has a static member, since the generated code calls + // __cxa_guard_acquire and __cxa_guard_release which help + // implement exactly once semantics for the initialization of the + // static Dispatcher& s above (for the non-mobile case). That + // additional code when duplicated across all operator stubs + // for every backend results in a lot of additional code + // being generated by the compiler. + return realSingleton(); +#endif + } + + // ------------------------------------------------------------------------ + // + // Accessing operators by schema + // + // ------------------------------------------------------------------------ + + /** + * Looks for an operator schema with the given name and overload name + * and returns it if it is registered WITH A SCHEMA. + * Returns nullopt otherwise. + */ + std::optional findSchema(const OperatorName& operator_name); + + /** + * Variant of findSchema that results in less code generated at the call site. + * It (1) takes const char* pointer rather than OperatorName (so we skip + * generating std::string constructor calls at the call site), and (2) + * it raises an exception if the operator is not found (so we skip + * generating exception raising code at the call site) + * + * Irritatingly, we still have to generate the handful of instructions + * for dealing with an exception being thrown during static initialization + * (e.g. __cxa_guard_abort). If we could annotate this method noexcept we + * could avoid this code too, but as the name of the function suggests, + * it does throw exceptions. + */ + OperatorHandle findSchemaOrThrow(const char* name, const char* overload_name); + + // Like findSchema, but also returns OperatorHandle even if there is no schema + std::optional findOp(const OperatorName& operator_name); + + // Returns a list of all operator names present in the operatorLookupTable_ + const std::vector getAllOpNames(); + + // ------------------------------------------------------------------------ + // + // Invoking operators + // + // ------------------------------------------------------------------------ + + template + Return call(const TypedOperatorHandle& op, Args... args) const; + + + template + static Return callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args); + + // Like call, but intended for use in a redispatch in kernels that have explicitly performed the DispatchKey update calculatulation. + // This will take the DispatchKeySet completely as is and dispatch to the kernel of the corresponding highest priority key in the set. + // Note that this version of redispatch treats the inputted DispatchKeySet *as is*, and does NOT mask out the highest priority key. + // See Note [Plumbing Keys Through The Dispatcher] + template + Return redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const; + + // Invoke an operator via the boxed calling convention using an IValue stack + void callBoxed(const OperatorHandle& op, Stack* stack) const; + void callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const; + + // TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) + // See Note [Plumbing Keys Through The Dispatcher] + void redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const; + + bool hasBackendFallbackForDispatchKey(DispatchKey dk) { + auto dispatch_ix = getDispatchTableIndexForDispatchKey(dk); + if (dispatch_ix < 0) return false; + return backendFallbackKernels_[dispatch_ix].kernel.isValid(); + } + + // Used by torchdeploy/multipy for multiple interpreters racing. + void waitForDef(const FunctionSchema& schema); + void waitForImpl(const OperatorName& op_name, std::optional dispatch_key); + + // ------------------------------------------------------------------------ + // + // Performing registrations (NON user public; use op_registration) + // + // ------------------------------------------------------------------------ + + /** + * Register a new operator schema. + * + * If a schema with the same operator name and overload name already exists, + * this function will check that both schemas are exactly identical. + */ + RegistrationHandleRAII registerDef(FunctionSchema schema, std::string debug, std::vector tags = {}); + + /** + * Register a kernel to the dispatch table for an operator. + * If dispatch_key is nullopt, then this registers a fallback kernel. + * + * @return A RAII object that manages the lifetime of the registration. + * Once that object is destructed, the kernel will be deregistered. + */ + // NB: steals the inferred function schema, as we may need to hold on to + // it for a bit until the real schema turns up + RegistrationHandleRAII registerImpl(OperatorName op_name, std::optional dispatch_key, KernelFunction kernel, std::optional cpp_signature, std::unique_ptr inferred_function_schema, std::string debug); + + /** + * Given an operator, tells the Dispatcher that we have implemented a fake impl + * for this op in the given Python module. Call this a "pystub". + */ + RegistrationHandleRAII registerPythonModule(const OperatorName& op_name, const char* pymodule, const char* context); + + /** + * Given an operator, throws if we have a pystub. + */ + void throwIfHasPythonModule(OperatorName op_name); + + std::optional> getPyStub(OperatorName op_name); + + /** + * Register a new operator by name. + */ + RegistrationHandleRAII registerName(OperatorName op_name); + + /** + * Register a fallback kernel for a backend. + * If an operator is called but there is no concrete kernel for the dispatch + * key of the given operator arguments, it will check if there is such a + * fallback kernel for the given dispatch key and, if yes, call that one. + */ + RegistrationHandleRAII registerFallback(DispatchKey dispatch_key, KernelFunction kernel, std::string debug); + + /** + * Use to register whenever we had a TORCH_LIBRARY declaration in the frontend + * API. These invocations are only permitted once per program, so we raise + * an error if this is called again for the same namespace. + */ + RegistrationHandleRAII registerLibrary(std::string ns, std::string debug); + + // ------------------------------------------------------------------------ + // + // Listeners on registrations + // + // ------------------------------------------------------------------------ + + /** + * Add a listener that gets called whenever a new op is registered or an existing + * op is deregistered. Immediately after registering, this listener gets called + * for all previously registered ops, so it can be used to keep track of ops + * registered with this dispatcher. + */ + RegistrationHandleRAII addRegistrationListener(std::unique_ptr listener); + + void checkInvariants() const; + + // + // ------------------------------------------------------------------------ + // + // Assertions + // + // ------------------------------------------------------------------------ + + /** + * For testing purposes. + * Returns a list of all operators that were created through calls to registerImpl(), + * without any corresponding calls to registerDef(). After static initialization + * is done this is almost certainly a bug, as the created OperatorHandle won't have + * any schema associated with it and users calling the op through the dispatcher + * won't be able to access it + * + * Note that we cannot enforce this invariant "as we go" during static initialization, + * due to undefined static initialization order- we have no guarantees over the order + * in which .def() and .impl() calls are registered in the dispatcher at static + * initialization time. So this function should only be called after static initialization. + */ + std::vector findDanglingImpls() const; + + /** + * Useful for inspecting global Dispatcher registration state. + * Returns the names of all operators with a kernel registered for the specified DispatchKey. + * If no DispatchKey is specified, it returns all registered operators. + */ + std::vector getRegistrationsForDispatchKey(std::optional k) const; + +private: + Dispatcher(); + + static int64_t sequenceNumberForRunningRecordFunction(DispatchKey dispatchKey, DispatchKeySet dispatchKeySet); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet); + static void runRecordFunction(at::RecordFunction& guard, at::RecordFunction::schema_ref_t schema_ref, DispatchKey dispatchKey, DispatchKeySet dispatchKeySet, c10::ArrayRef args); + + #ifdef FBCODE_CAFFE2 + static bool profilingOperatorEvents(); + static void fireOpStartUSDT(at::RecordFunction::schema_ref_t schema_ref); + static void fireOpEndUSDT(at::RecordFunction::schema_ref_t schema_ref); + #endif // FBCODE_CAFFE2 + + OperatorHandle findOrRegisterSchema_(FunctionSchema&& schema); + OperatorHandle findOrRegisterName_(const OperatorName& op_name); + + void deregisterDef_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterImpl_( + const OperatorHandle& op, + const OperatorName& op_name, + std::optional dispatch_key, + impl::OperatorEntry::AnnotatedKernelContainerIterator kernel_handle); + void deregisterName_(const OperatorHandle& op, const OperatorName& op_name); + void deregisterFallback_(DispatchKey dispatchKey); + void deregisterLibrary_(const std::string& ns); + void cleanup(const OperatorHandle& op, const OperatorName& op_name); + void checkSchemaCompatibility(const OperatorHandle& op, const FunctionSchema& schema, const std::string& debug); + + std::list operators_; +#if !defined(C10_MOBILE) + LeftRight> operatorLookupTable_; +#else + RWSafeLeftRightWrapper> operatorLookupTable_; +#endif + // Map from namespace to debug string (saying, e.g., where the library was defined) + ska::flat_hash_map libraries_; + + std::array backendFallbackKernels_; + + std::unique_ptr listeners_; + + // This condition variable gets notified whenever we add a new def/impl to the + // dispatch table. This is primarily used by multipy/torchdeploy, when + // we have multiple interpreters trying to register to the dispatch table. + // In this situation, whenever the non-primary interpreter would have tried + // to register to the dispatch table, instead it will check to see if the + // expected registration has already been made, and if it hasn't, wait on + // this condition variable to see if it was just racing with the primary + // interpreter. + // + // We expect it to be rare for there to be any waiters on this condition + // variable. This is mostly just to help give better diagnostics if + // something goes horribly wrong + std::condition_variable cond_var_; + + // Protect concurrent access to the dispatcher. We store this in a + // `shared_ptr` as we return callbacks that call back into dispatcher methods, + // and we need to be able to handle and guard against the event when the + // `Dispatcher` has been destroyed before the callbacks fire. + std::shared_ptr guard_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * This handle can be used to register kernels with the dispatcher or + * to lookup a kernel for a certain set of arguments. + */ +class TORCH_API OperatorHandle { + template friend struct std::hash; + +public: + OperatorHandle(OperatorHandle&&) noexcept = default; + OperatorHandle& operator=(OperatorHandle&&) noexcept = default; + OperatorHandle(const OperatorHandle&) = default; + OperatorHandle& operator=(const OperatorHandle&) = default; + // NOLINTNEXTLINE(performance-trivially-destructible) + ~OperatorHandle(); + + const OperatorName& operator_name() const { + return operatorDef_->op.operator_name(); + } + + bool hasSchema() const { + return operatorDef_->op.hasSchema(); + } + + const FunctionSchema& schema() const { + return operatorDef_->op.schema(); + } + + const std::string& debug() const { + return operatorDef_->op.debug(); + } + + std::string dumpState() const { + return operatorDef_->op.dumpState(); + } + + bool hasKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasKernelForDispatchKey(k); + } + + bool isKernelFallthroughKernel(DispatchKey k) const { + return operatorDef_->op.kernelForDispatchKey(k).isFallthrough(); + } + + bool hasKernelForAnyDispatchKey(DispatchKeySet k) const { + return operatorDef_->op.hasKernelForAnyDispatchKey(k); + } + + bool hasComputedKernelForDispatchKey(DispatchKey k) const { + return operatorDef_->op.hasComputedKernelForDispatchKey(k); + } + + std::string dumpComputedTable() const { + return operatorDef_->op.dumpComputedTable(); + } + + void checkInvariants() const { + return operatorDef_->op.checkInvariants(); + } + + c10::ArrayRef getTags() const { + return operatorDef_->op.getTags(); + } + + void setReportErrorCallback_(std::unique_ptr callback) { + operatorDef_->op.setReportErrorCallback_(std::move(callback)); + } + + bool hasTag(const at::Tag& tag) const { + for(const auto& tag_: getTags()) { + if (tag == tag_) { + return true; + } + } + return false; + } + + template + TypedOperatorHandle typed() const { + // NB: This assert is not 100% sound: you can retrieve a typed() operator + // handle prior to ANY C++ signature being registered on the operator + // and the check will say everything is OK (at which point you can then + // smuggle in a kernel that is typed incorrectly). For everything + // in core library this won't happen, because all the static registrations + // will be done by the time a typed() handle is acquired. +#if !defined C10_MOBILE + operatorDef_->op.assertSignatureIsCorrect(); + if (fn_has_symint::value) { + operatorDef_->op.assertSignatureIsCorrect::type>(); + } +#endif + return TypedOperatorHandle(operatorIterator_); + } + + void callBoxed(Stack* stack) const { + c10::Dispatcher::singleton().callBoxed(*this, stack); + } + + void callBoxed(Stack& stack) const { + callBoxed(&stack); + } + + void callBoxedForDispatchKey(DispatchKey dk, Stack& stack) const { + c10::Dispatcher::singleton().callBoxedForDispatchKey(*this, dk, &stack); + } + + void redispatchBoxed(DispatchKeySet ks, Stack* stack) const { + c10::Dispatcher::singleton().redispatchBoxed(*this, ks, stack); + } + + template + PyObject* getPythonOp(c10::impl::PyInterpreter* self_interpreter, F slow_accessor) const { + return operatorDef_->op.getPythonOp(self_interpreter, slow_accessor); + } + + bool operator==(const OperatorHandle& other) const { + return operatorDef_ == other.operatorDef_; + } + + bool operator!=(const OperatorHandle& other) const { + return operatorDef_ != other.operatorDef_; + } + +private: + explicit OperatorHandle(std::list::iterator operatorIterator) + : operatorDef_(&*operatorIterator), operatorIterator_(operatorIterator) {} + friend class Dispatcher; + template friend class TypedOperatorHandle; + + // Storing a direct pointer to the OperatorDef even though we + // already have the iterator saves an instruction in the critical + // dispatch path. The iterator is effectively a + // pointer-to-std::list-node, and (at least in libstdc++'s + // implementation) the element is at an offset 16 bytes from that, + // because the prev/next pointers come first in the list node + // struct. So, an add instruction would be necessary to convert from the + // iterator to an OperatorDef*. + Dispatcher::OperatorDef* operatorDef_; + + // We need to store this iterator in order to make + // Dispatcher::cleanup() fast -- it runs a lot on program + // termination (and presuambly library unloading). + std::list::iterator operatorIterator_; +}; + +/** + * This is a handle to an operator schema registered with the dispatcher. + * It holds the same information as an OperatorHandle, but it is templated + * on the operator arguments and allows calling the operator in an + * unboxed way. + */ +template +class TypedOperatorHandle final { + static_assert(guts::false_t(), "FuncType in OperatorHandle::typed was not a valid function type"); +}; +template +class TypedOperatorHandle final : public OperatorHandle { +public: + TypedOperatorHandle(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle& operator=(TypedOperatorHandle&&) noexcept = default; + TypedOperatorHandle(const TypedOperatorHandle&) = default; + TypedOperatorHandle& operator=(const TypedOperatorHandle&) = default; + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return call(Args... args) const { + return c10::Dispatcher::singleton().call(*this, std::forward(args)...); + } + + // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && + C10_ALWAYS_INLINE Return redispatch(DispatchKeySet currentDispatchKeySet, Args... args) const { + return c10::Dispatcher::singleton().redispatch(*this, currentDispatchKeySet, std::forward(args)...); + } + +private: + explicit TypedOperatorHandle(std::list::iterator operatorIterator) + : OperatorHandle(operatorIterator) {} + friend class OperatorHandle; +}; + +namespace detail { +template inline void unused_arg_(const Args&...) {} + +// CaptureKernelCall is intended to capture return values from Dispatcher +// unboxed kernel calls. A record function may request to get outputs from the +// kernel calls. For boxed kernels, it's straightforward, the returned values +// are in the stack object. The stack can be passed to record functions. For +// unboxed kernels, we need to handle different kinds of return values, cache +// them temporarily, then release the values for the actual function call +// return. +template +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) + // Calls the kernel and capture the result in output_. + : output_{kernel.template call( + op, + dispatchKeySet, + std::forward(args)...)} {} + // Wraps the return values in a Stack. + Stack getOutputs() { + Stack stack; + impl::push_outputs::copy(output_, &stack); + return stack; + } + // Since we are returning the output_, we don't expect the output_ to be used + // afterward. Copy elision and RVO do not apply to class data members. Using + // move semantic to avoid copies when possible. + ReturnType release() && { + return std::move(output_); + } + + private: + ReturnType output_; +}; + +// Handle the lvalue reference differently since it should not be moved. +template <> +inline at::Tensor& CaptureKernelCall::release() && { + return output_; +} + +// Handle case where the kernel returns void. +template <> +struct CaptureKernelCall { + template + CaptureKernelCall( + const F& kernel, + const TypedOperatorHandle& op, + const DispatchKeySet& dispatchKeySet, + Args&&... args) { + // Calling the kernel and no need to capture void. + kernel.template call( + op, dispatchKeySet, std::forward(args)...); + } + Stack getOutputs() { + return Stack(); + } + void release() && {} +}; + +} // namespace detail + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::callWithDispatchKeySlowPath(const TypedOperatorHandle& op, at::StepCallbacks& stepCallbacks, DispatchKeySet dispatchKeySet, const KernelFunction& kernel, Args... args) { + // If callbacks need inputs, we box the arguments and pass them to the guard. + // Note: For perf reasons we wouldn't want to prematurely box the arguments. + at::RecordFunction guard(std::move(stepCallbacks)); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(op.operatorDef_->op.isObserved()); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + constexpr auto num_boxed_args = impl::boxed_size(); + if constexpr (num_boxed_args != 0) { + if (guard.needsInputs()) { + // If we used std::array here, we would + // have to spend time default constructing the IValues in + // boxedArgs. aligned_storage has no such requirement. + impl::IValueAlignedStorage boxedArgs[num_boxed_args]; + // For debugging only; could be removed (but the compiler will do + // that for us and it's nice to have the extra assurance of + // correctness from our debug builds). + int lastArgIdx = 0; + impl::boxArgsToStack(boxedArgs, lastArgIdx, args...); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(lastArgIdx == num_boxed_args); + // I don't *think* we need std::launder here, because IValue has + // no subclasses and no const or reference fields. + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef(reinterpret_cast(boxedArgs), num_boxed_args)); + for (size_t ii = 0; ii < num_boxed_args; ++ii) { + reinterpret_cast(&boxedArgs[ii])->~IValue(); + } + } else { + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + } + } else { + runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + } + + if (C10_UNLIKELY(guard.needsOutputs())) { + // Calls the kernel and capture the output temporarily to pass to + // RecordFunction. + detail::CaptureKernelCall captureKernelCall( + kernel, op, dispatchKeySet, std::forward(args)...); + guard.setOutputs(captureKernelCall.getOutputs()); + // Releases the captured output to return to caller. + return std::move(captureKernelCall).release(); + } + + // keeping the guard alive while executing the kernel + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +C10_ALWAYS_INLINE_UNLESS_MOBILE Return Dispatcher::call(const TypedOperatorHandle& op, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + auto dispatchKeySet = op.operatorDef_->op.dispatchKeyExtractor() + .template getDispatchKeySetUnboxed(args...); +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + auto nesting_value = dispatch_trace_nesting_value(); + for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " "; + std::cerr << "[call] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && op.operatorDef_->op.isObserved())) { + return callWithDispatchKeySlowPath(op, *step_callbacks, dispatchKeySet, kernel, std::forward(args)...); + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + +#ifdef FBCODE_CAFFE2 + if(profilingOperatorEvents()) { + struct FireOpRAII { + FireOpRAII(at::RecordFunction::schema_ref_t schema_ref) : schema_ref_(schema_ref) { + fireOpStartUSDT(schema_ref); + } + ~FireOpRAII() { fireOpEndUSDT(schema_ref_); } + at::RecordFunction::schema_ref_t schema_ref_; + } event(op.schema()); + return kernel.template call(op, dispatchKeySet, std::forward(args)...); + } else { + return kernel.template call(op, dispatchKeySet, std::forward(args)...); + } +#else + return kernel.template call(op, dispatchKeySet, std::forward(args)...); +#endif // FBCODE_CAFFE2 +} + +// See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && +template +inline Return Dispatcher::redispatch(const TypedOperatorHandle& op, DispatchKeySet currentDispatchKeySet, Args... args) const { + detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5 + // do not use RecordFunction on redispatch +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + auto nesting_value = dispatch_trace_nesting_value(); + for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " "; + std::cerr << "[redispatch] op=[" << op.operator_name() << "], key=[" << toString(currentDispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const KernelFunction& kernel = op.operatorDef_->op.lookup(currentDispatchKeySet); + return kernel.template call(op, currentDispatchKeySet, std::forward(args)...); +} + +inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + auto nesting_value = dispatch_trace_nesting_value(); + for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " "; + std::cerr << "[callBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); +#ifndef PYTORCH_DISABLE_PER_OP_PROFILING + auto step_callbacks = at::getStepCallbacksUnlessEmpty(at::RecordScope::FUNCTION); + if (C10_UNLIKELY(step_callbacks.has_value() && entry.isObserved())) { + at::RecordFunction guard(std::move(*step_callbacks)); + auto dispatchKey = dispatchKeySet.highestPriorityTypeId(); + auto& schema = op.schema(); + auto schema_ref = std::reference_wrapper(schema); + guard.needsInputs() ? runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet, c10::ArrayRef(stack->data(), stack->size())) + : runRecordFunction(guard, schema_ref, dispatchKey, dispatchKeySet); + + // keeping the guard alive while executing the kernel + kernel.callBoxed(op, dispatchKeySet, stack); + + if (C10_UNLIKELY(guard.needsOutputs())) { + guard.setOutputs(*stack); + } + return; + } +#endif // PYTORCH_DISABLE_PER_OP_PROFILING + kernel.callBoxed(op, dispatchKeySet, stack); +} + +// NB: this doesn't count as a "true" dispatcher jump, so no instrumentation +inline void Dispatcher::callBoxedForDispatchKey(const OperatorHandle& op, DispatchKey dk, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; + // We still compute this as we're obligated to pass it on to the internal + // kernel, if it is a boxed fallback + auto dispatchKeySet = entry.dispatchKeyExtractor().getDispatchKeySetBoxed(stack); + const auto& kernel = ([&]() { + if (op.hasKernelForDispatchKey(dk)) { + return entry.kernelForDispatchKey(dk); + } else { + auto idx = getDispatchTableIndexForDispatchKey(dk); + TORCH_INTERNAL_ASSERT(idx >= 0); + return backendFallbackKernels_[idx].kernel; + } + })(); + kernel.callBoxed(op, dispatchKeySet, stack); +} + +inline void Dispatcher::redispatchBoxed(const OperatorHandle& op, DispatchKeySet dispatchKeySet, Stack* stack) const { + // note: this doesn't need the mutex because write operations on the list keep iterators intact. + const auto& entry = op.operatorDef_->op; +#ifndef NDEBUG + DispatchTraceNestingGuard debug_guard; + if (show_dispatch_trace()) { + auto nesting_value = dispatch_trace_nesting_value(); + for (int64_t i = 0; i < nesting_value; ++i) std::cerr << " "; + std::cerr << "[redispatchBoxed] op=[" << op.operator_name() << "], key=[" << toString(dispatchKeySet.highestPriorityTypeId()) << "]" << std::endl; + } +#endif + const auto& kernel = entry.lookup(dispatchKeySet); + return kernel.callBoxed(op, dispatchKeySet, stack); +} + +} // namespace c10 + +namespace std { + +template <> +struct hash { + size_t operator()(const c10::OperatorHandle& op) const noexcept { + return std::hash{}(static_cast(op.operatorDef_)); + } +}; + +} // namespace std diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h new file mode 100644 index 0000000000000000000000000000000000000000..1741171fbf00412647178b2210071cee36928e54 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/ObservedOperators.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +struct TORCH_API ObservedOperators { + ObservedOperators() = delete; + + static bool isObserved(const OperatorName& name); + + static std::unordered_set& getUnobservedOperatorList(); +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h new file mode 100644 index 0000000000000000000000000000000000000000..873b385845ed3857957ddf924c6d5a317e1d92f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorEntry.h @@ -0,0 +1,313 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#ifdef C10_MOBILE +#define C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY +#endif + +namespace c10 { + +class Dispatcher; + +namespace impl { + +// This data structure represents a kernel that was registered to us from a +// user. Unlike KernelFunction, AnnotatedKernel contains some extra metadata +// about the kernel that isn't necessary for actual dispatching (this is why +// we don't put AnnotatedKernel in the actual DispatchTable), but is useful for +// giving good error messages. +struct AnnotatedKernel final { + AnnotatedKernel(KernelFunction k, std::unique_ptr s, std::string d) + : kernel(std::move(k)) + , inferred_function_schema(std::move(s)) + , debug(std::move(d)) + {} + AnnotatedKernel() = default; + KernelFunction kernel; + std::unique_ptr inferred_function_schema; + // A little debug string to help us identify the kernel in question. + // Most importantly it records the TORCH_LIBRARY block that did the + // registration. + std::string debug; +}; + +// This data structure represents operator schema, with metadata specifying +// where the registration of this schema occurred +struct AnnotatedSchema final { + AnnotatedSchema(FunctionSchema s, std::string d) + : schema(std::move(s)) + , debug(std::move(d)) + {} + FunctionSchema schema; + std::string debug; +}; + +// Internal data structure that records information about a specific operator. +// It's not part of the public API; typically, users will interact with +// OperatorHandle instead. +// +// Concurrent writes to OperatorEntry are protected by the GLOBAL Dispatcher +// lock (this is important because some methods in OperatorEntry access +// dispatcher state) +class TORCH_API OperatorEntry final { +public: + explicit OperatorEntry(OperatorName&& operator_name); + + OperatorEntry(const OperatorEntry&) = delete; + OperatorEntry(OperatorEntry&&) noexcept = delete; + OperatorEntry& operator=(const OperatorEntry&) = delete; + OperatorEntry& operator=(OperatorEntry&&) noexcept = delete; + + const FunctionSchema& schema() const { + TORCH_INTERNAL_ASSERT(schema_.has_value(), "Tried to access the schema for ", name_, " which doesn't have a schema registered yet"); + return schema_->schema; + } + const std::string& debug() const { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + return schema_->debug; + } + bool hasSchema() const { + return schema_.has_value(); + } + + bool isObserved() const { + return is_observed_; + } + + // We may allocate an OperatorEntry for an operator even when we don't + // have a schema. When we receive the schema registration, we post + // facto register a schema. + // + // NB: registerSchema/deregisterSchema are not idempotent; if you + // attempt to register a schema when one is already present or vice + // versa that is an error. (Refcounting for the registrations is + // handled in the OperatorHandle in Dispatcher) + void registerSchema(FunctionSchema&&, std::string&& debug, std::vector tags = {}); + void deregisterSchema(); + + const OperatorName& operator_name() const { + return name_; + } + +#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY + using AnnotatedKernelContainer = std::array; +#else + using AnnotatedKernelContainer = std::list; +#endif + using AnnotatedKernelContainerIterator = AnnotatedKernelContainer::iterator; + + // Why are kernels and fallback asymmetric? It has to do with ownership. + // Kernels and the computed dispatch tables for them are canonically + // owned by OperatorEntry, but backend fallbacks are specified once + // and apply for all operators, so they should be owned by Dispatcher. + // However, the registration of a backend fallback affects the + // state of the computed dispatch table, so when a backend fallback + // is updated, we need to update the operator tables too. Thus, + // registerKernel is the mechanism by which we give kernels to + // operator entry to own (and update dispatch table), but we only + // need a non-owning mechanism to update fallback. + + // Precondition: Dispatcher::mutex_ is held + // Postcondition: caller is responsible for disposing of the kernel + AnnotatedKernelContainerIterator registerKernel( + const Dispatcher& dispatcher, + std::optional dispatch_key, + KernelFunction kernel, + std::optional cpp_signature, + std::unique_ptr inferred_function_schema, + std::string debug + ); + + // Precondition: Dispatcher::mutex_ is held + void deregisterKernel_( + const Dispatcher& dispatcher, + std::optional dispatch_key, + AnnotatedKernelContainerIterator kernel + ); + + // Precondition: Dispatcher::mutex_ is held + void updateFallback( + const Dispatcher& dispatcher, + DispatchKey dispatch_key + ); + + // Precondition: Dispatcher::mutex_ is held + void updateSchemaAliasAnalysis(AliasAnalysisKind a) { + TORCH_INTERNAL_ASSERT(schema_.has_value()); + schema_->schema.setAliasAnalysis(a); + } + + std::string dumpComputedTable() const; + std::string dumpState() const; + void checkInvariants() const; + + const DispatchKeyExtractor& dispatchKeyExtractor() const { return dispatchKeyExtractor_; } + + // Asserts that the given FuncType is correct for calling this operator in an unboxed way. + template + inline void assertSignatureIsCorrect() { + assertSignatureIsCorrect(CppSignature::make(), fn_has_symint::value); + } + + void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const; + + [[noreturn]] void reportError(DispatchKey dispatchKey) const; + + const KernelFunction& lookup(DispatchKeySet ks) const { + const auto idx = ks.getDispatchTableIndexForDispatchKeySet(); + if (C10_UNLIKELY(idx == -1)) { + reportError(ks.highestPriorityTypeId()); + } + const auto& kernel = dispatchTable_[idx]; + // A valid kernel *always* has a boxed kernel and *may* have an + // unboxed kernel. However, we typically do unboxed calls in at:: + // APIs, where the kernel 1) will very likely be valid and 2) + // should have an unboxed kernel. Checking the unboxed kernel + // first will allow us to avoid touching the boxed kernel at all + // in the common case. + if (C10_UNLIKELY(!kernel.isValidUnboxed())) { + if (!kernel.isValid()) { + reportError(ks.highestPriorityTypeId()); + } + } + return kernel; + } + + std::string listAllDispatchKeys() const; + + // Returns true if kernel_ has entry for any key in ks. + // + // Invariant: There are no alias keys in the passed-in dispatch key set. + // Note [No Alias Keys in DispatchKeySet] + // Alias keys should be checked using `hasKernelForDispatchKey` + // Alias keys shouldn't go inside of a DispatchKeySet, since they can technically + // have a value > 63 (causing overflow). + bool hasKernelForAnyDispatchKey(DispatchKeySet ks) const; + // Returns true if kernel_ has entry for a particular key. + bool hasKernelForDispatchKey(DispatchKey k) const; + // Retrieves the kernel entry at a particular key. Symmetric with + // hasKernelForDispatchKey. To get the AnnotatedKernel, see + // getKernelForDispatchKey (private) + const KernelFunction& kernelForDispatchKey(DispatchKey k) const; + // Returns true if the "computed table" has an entry for a particular key. + bool hasComputedKernelForDispatchKey(DispatchKey k) const; + // Returns all the operator tags added at the time of registration + const std::vector& getTags() const; + void setReportErrorCallback_(std::unique_ptr callback); + + template + PyObject* getPythonOp(PyInterpreter* self_interpreter, F slow_accessor) const { + return py_cache_.ptr_or(self_interpreter, slow_accessor); + } + +private: + + OperatorName name_; + std::optional schema_; + #ifndef C10_MOBILE + std::vector tags_; + #endif + std::array dispatchTable_; + DispatchKeyExtractor dispatchKeyExtractor_; + // Pointer to the torch.ops.ns.op.overload object for speed + c10::PyHandleCache py_cache_; + + // kernels_ stores all registered kernels for the corresponding dispatch key + // and catchAllKernels_ stores the catch-all kernels. + // If an operator library gets loaded that overwrites an already existing kernel, + // both kernels will be in that list but only the newer one will be in + // dispatchTable. If any of the kernels go away (say the library gets + // unloaded), we remove the kernel from this list and update the + // dispatchTable if necessary. + // Kernels in the list are ordered by registration time descendingly, + // newer registrations are before older registrations. + // We do not combine dispatchTable and kernels into one hash map because + // kernels is a larger data structure and accessed quite infrequently + // while dispatchTable is accessed often and should be kept small to fit + // into CPU caches. + // Invariants: + // - dispatchTable[dispatch_key] == kernels_[dispatch_key].front() + // - dispatchTable[dispatch_key] does not exist if and only if + // kernels_[dispatch_key] does not exist + // - If kernels_[dispatch_key] exists, then it has elements. + // It is never an empty list. + // + // Why do we do that? + // ----- + // We mostly do this to enable Jupyter notebooks where a cell registering + // a kernel could be executed multiple times and the later execution + // should overwrite the earlier one. Note that this still fails when the + // function schema changed between the executions, but it works as long + // as the function schema didn't change. A better solution would be to + // unload the old extension library from the Jupyter cell when the cell is + // re-executed and then only allow one kernel here, i.e. error if a kernel + // is already registered, but that's a lot of effort to implement and + // currently not high-pri. + ska::flat_hash_map +#else + std::list +#endif + > kernels_; + + const AnnotatedKernel& missingKernel() const; + const AnnotatedKernel& ambiguousAutogradOtherKernel() const; + + // cpp_signature_ stores function signature if any of + // the kernels was created in a way that allowed us to know the function + // signature (i.e. by supplying an unboxed C++ kernel function). + // If this is set, it will be used to check that future kernel + // registrations match and it will be used in unboxed function calls + // to verify their arguments against the known function signature. + struct CppSignatureWithDebug { + CppSignature signature; + std::string debug; + std::optional dispatch_key; + }; + std::optional cpp_signature_; + std::optional sym_cpp_signature_; + + // A Python custom error handler for OperatorEntry::reportError + std::unique_ptr report_error_callback_; + + // Whether this operator needs to be observed with RecordFunction + const bool is_observed_; + + [[noreturn]] void reportSignatureError(const CppSignature& call_signature, const CppSignatureWithDebug& saved_signature) const; + const KernelFunction& computeDispatchTableEntry(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key) const; + std::pair computeDispatchTableEntryWithDebug( + const c10::Dispatcher& dispatcher, DispatchKey dispatch_key + ) const; + // This function re-establishes the invariant that dispatchTable + // contains the front element from the kernels list for a given runtime dispatch key. + void updateDispatchTableEntry_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but also handles alias dispatch keys. + void updateDispatchTable_(const c10::Dispatcher& dispatcher, DispatchKey dispatch_key); + // Like above, but for ALL entries in the dispatch table. + void updateDispatchTableFull_(const c10::Dispatcher& dispatcher); + // Retrieves a pointer to AnnotatedKernel at kernels_.at(dispatch_key).front(). + const AnnotatedKernel* getKernelForDispatchKey(DispatchKey dispatch_key) const; +}; + +} // namespace impl +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..5c87f93657ac174b341074359e661c8e187421d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/OperatorOptions.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace c10 { + +enum class AliasAnalysisKind : uint8_t { + INTERNAL_SPECIAL_CASE, + CONSERVATIVE, // The most conservative alias analysis type, assumes + // side-effects. This is the default analysis. + FROM_SCHEMA, + PURE_FUNCTION +}; + +#if !defined(_MSC_VER) +constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr. +#endif +inline const char* toString(AliasAnalysisKind aliasAnalysisKind) { + return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE) + ? "CONSERVATIVE" + : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA) + ? "FROM_SCHEMA" + : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION) + ? "PURE_FUNCTION" + : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE) + ? "INTERNAL_SPECIAL_CASE" + : "UNKNOWN"; +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h new file mode 100644 index 0000000000000000000000000000000000000000..e6ef2128fd495f873465c98b10ebfad6f1e323df --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/dispatch/RegistrationHandleRAII.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace c10 { + +class RegistrationHandleRAII final { +public: + explicit RegistrationHandleRAII(std::function onDestruction) + : onDestruction_(std::move(onDestruction)) {} + + ~RegistrationHandleRAII() { + if (onDestruction_) { + onDestruction_(); + } + } + + RegistrationHandleRAII(const RegistrationHandleRAII&) = delete; + RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete; + + RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept + : onDestruction_(std::move(rhs.onDestruction_)) { + rhs.onDestruction_ = nullptr; + } + + RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept { + onDestruction_ = std::move(rhs.onDestruction_); + rhs.onDestruction_ = nullptr; + return *this; + } + +private: + std::function onDestruction_; +}; + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h new file mode 100644 index 0000000000000000000000000000000000000000..f17917500e3b4ba34fb310d849c7529295f88f6e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include +#include + +/* + * [Note: hacky wrapper removal for optional tensor] + * + * The kernel implementation takes an optional tensor marked in the schema as + * Tensor? but the C++ function takes Tensor instead of the optional + * expected by the dispatcher. + * + * To remove the hacky wrapper, the C++ function is changed to take + * optional and unwrap the Tensor value at the beginning of + * the function, e.g.: + * > c10::MaybeOwned weight_maybe_owned = + * > at::borrow_from_optional_tensor(weight_opt); + * > const Tensor& weight = *weight_maybe_owned; + * + * We may want to make the kernel handle optional directly without + * going through the creation of a default-constructed Tensor in + * at::borrow_from_optional_tensor. + */ + +/* + * [Note: hacky wrapper removal for TensorOptions] + * + * The kernel implementation takes a TensorOptions argument but the dispatcher + * expects separate arguments for dtype, layout, device, pin_memory. + * + * To remove the hacky wrapper, the kernel implementation is changed to take + * the 4 arguments (dtype, layout, device, pin_memory), and assemble the + * TensorOptions value at the beginning of the function, e.g.: + * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout) + * > .device(device).pinned_memory(pin_memory); + * + * We may want make the kernel handle these parameters directly without going + * through the creation of a TensorOptions value. + */ + +namespace c10 { +namespace impl { + +TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName); + +inline void check_and_update_common_device(optional& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + // TODO: Remove this once the following issue is addressed: + // https://github.com/pytorch/pytorch/issues/57380 + if (!tensor.defined()) { + return; + } + + if (!common_device.has_value()) { + common_device = tensor.device(); + return; + } + + if (C10_UNLIKELY(common_device != tensor.device())) { + common_device_check_failure(*common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const optional& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + if (tensor.has_value()) { + check_and_update_common_device(common_device, tensor.value(), methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const List>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} +} // namespace impl +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..2f845f7c4c10f1ac8f48edae0c37c743ef8fea64 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/infer_schema.h @@ -0,0 +1,160 @@ +#pragma once + +/** + * This file contains functionality to take a C++ function and infer its + * c10::FunctionSchema. + */ + +#include +#include + +namespace c10 { +namespace detail { + +namespace infer_schema { + +/// The templated inference code creates `ArgumentDef` instead of `Argument`, +/// because that can be constructed at compile time and has a much smaller +/// binary size than having calls to `Argument` constructors in the template. +/// Creating `Argument` objects from `ArgumentDef` can then be done at +/// runtime in a non-templated way. +struct ArgumentDef final { + using GetTypeFn = TypePtr(); + GetTypeFn* getTypeFn; + GetTypeFn* getFakeTypeFn; + constexpr ArgumentDef(): getTypeFn(nullptr), getFakeTypeFn(nullptr) {} + explicit constexpr ArgumentDef(GetTypeFn *getTypeFn, GetTypeFn *getFakeTypeFn): getTypeFn(getTypeFn), getFakeTypeFn(getFakeTypeFn) {} +}; + +template +struct bool_t {}; +template<> struct bool_t : std::true_type {}; +template<> struct bool_t : std::false_type {}; + +/// Checks the static C++ types `Types` for correctness to catch common error cases. +template +constexpr int checkStaticTypes() { + // Give nice error messages for some of the common error cases. + // Use a LOUD ERROR MESSAGE SO USERS SEE THE STATIC_ASSERT + static_assert(std::conjunction< + bool_t::value || std::is_same::value || std::is_same::value || std::is_same::value>... + >::value, "INVALID TYPE: Only int8_t, int64_t and bool are supported as an integral argument type"); + static_assert(std::conjunction< + bool_t::value>... + >::value, "INVALID TYPE: float is not supported as an argument type, use double instead"); + return 0; +} + +template +constexpr std::array createArgumentVectorFromTypes(std::index_sequence) { + return ( + // Check types for common errors + checkStaticTypes(), + + // Create the return value + std::array{ + ArgumentDef(&getTypePtrCopy>, &getFakeTypePtrCopy>)...} + ); +} + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as template arguments. +template struct createArguments final {}; +template +struct createArguments> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +/// Creates a vector of `ArgumentDef` from a list of C++ types that are specified +/// as a tuple (i.e. in the way c10 kernels return values). +/// It can be a tuple if there's three output arguments with types A, B, C. +/// It can be an empty tuple<>, or void for kernels that don't return anything. +/// It can be a single type A (i.e. no tuple) for the case where a kernel just +/// returns one value. +template struct createReturns final {}; + +template +struct createReturns, void> final { + static constexpr std::array call() { + return createArgumentVectorFromTypes( + std::make_index_sequence() + ); + } +}; + +template +struct createReturns::value && !guts::is_instantiation_of::value>> final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template<> +struct createReturns final { + static constexpr std::array call() { + return createReturns>::call(); + } +}; + +template +struct createSingleReturn { + static constexpr std::array call() { + return createArgumentVectorFromTypes(std::make_index_sequence<1>()); + } +}; + +TORCH_API FunctionSchema make_function_schema(std::string&& name, std::string&& overload_name, c10::ArrayRef arguments, c10::ArrayRef returns); +TORCH_API FunctionSchema make_function_schema(c10::ArrayRef arguments, c10::ArrayRef returns); + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Flattens std::tuple returns into multiple return types +template +FunctionSchema createFunctionSchemaFromTraitsFlattenedReturns() { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createReturns::call(); + + return make_function_schema(arguments, returns); +} + +/// Creates a `FunctionSchema` object from a `FunctionTraits` type for a +/// function. Preserves std::tuple returns as a Tuple return type +template +FunctionSchema createFunctionSchemaFromTraitsSingleReturn(std::string&& name, std::string&& overload_name) { + using ReturnType = typename FunctionTraits::return_type; + using ParameterTypes = typename FunctionTraits::parameter_types; + + // arguments and returns are computed into a std::array at compile time and embedded into the binary. + // The only code executed at runtime here is the one that creates a std::vector + // of the arguments/returns from the std::array. + constexpr auto arguments = createArguments::call(); + constexpr auto returns = createSingleReturn::call(); + + return make_function_schema(std::move(name), std::move(overload_name), arguments, returns); +} + +} +} + +template +FunctionSchema inferFunctionSchemaFlattenedReturns() { + return detail::infer_schema::createFunctionSchemaFromTraitsFlattenedReturns>(); +} + +template +FunctionSchema inferFunctionSchemaSingleReturn(std::string&& name, std::string&& overload_name) { + return detail::infer_schema::createFunctionSchemaFromTraitsSingleReturn>(std::move(name), std::move(overload_name)); +} + +TORCH_API std::optional findSchemaDifferences(const FunctionSchema& inferred, const FunctionSchema& specified); + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h new file mode 100644 index 0000000000000000000000000000000000000000..6e77c565388156db9f14ff607b4c1bc42240d278 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h @@ -0,0 +1,199 @@ +#pragma once + +// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. +#ifdef TEMPLATE_SELECTIVE_BUILD +#include +#endif + +/** + * This header implements functionality to build PyTorch with only a certain + * set of operators (+ dependencies) included. + * + * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these + * two ops will be included in your build. The allowlist records operators + * only, no overloads; if you include aten::add, all overloads of aten::add + * will be included. + * + * Internally, this is done by removing the operator registration calls + * using compile time programming, and the linker will then prune all + * operator functions that weren't registered. + * See Note [Selective build] for more details + * + * WARNING: The allowlist mechanism doesn't work for all ways you could go about + * registering an operator. If the dispatch key / operator name is not + * sufficiently obvious at compile time, then the allowlisting mechanism + * will fail (and the operator will be included in the binary anyway). + */ + +#include +#include +#include + + +#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) +#include +#endif + +namespace c10 { + +namespace impl { + +constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare + +/** + * In selective build mode returns true/false depending on whether a build + * feature is available or not. + * + * In instrumenting mode (tracing mode), always returns true, and doesn't + * trigger any side effects. + */ +constexpr bool is_build_feature_available(const char* name) { +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) + // Selective Build mode. +#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST) + (void)name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST), + name); +#endif + +#else + // Instrumenting mode. + (void)name; + return true; +#endif +} + +[[noreturn]] void build_feature_required_feature_not_available(const char* feature); + +/** + * Use BUILD_FEATURE_REQUIRED macro in user-code. + * + * In selective build mode becomes a no-op if the build feature passed + * in is available. If not available, throws an exception (c10::Error). + * The compiler is able to perform dead code elimination for code + * following this method if the build feature is not available. + * + * In instrumenting mode (tracing mode), registers (as a side effect) + * the presence of this specific build feature being triggered. + */ +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode + +#if defined(TORCH_BUILD_FEATURE_ALLOWLIST) +#define BUILD_FEATURE_REQUIRED(NAME) \ + if (!c10::impl::is_build_feature_available(NAME)) { \ + ::c10::impl::build_feature_required_feature_not_available(NAME); \ + } +#else // Everything trivially selected +#define BUILD_FEATURE_REQUIRED(NAME) + +#endif + +#else // trace mode +#define BUILD_FEATURE_REQUIRED(NAME) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::BUILD_FEATURE, \ + std::string(NAME), \ + {}); +#endif + +// Use this macro, and not is_build_feature_available +#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) + +// returns true iff allowlist contains item +// allowlist_contains("a;bc;d", "bc") == true +constexpr bool allowlist_contains(string_view allowlist, string_view item) { + //Choose a really big value for next so that if something goes wrong + //this code will blow up in a hopefully detectable way. + size_t next = std::numeric_limits::max(); + for (size_t cur = 0; cur <= allowlist.size(); cur = next) { + next = allowlist.find(';', cur); + if (next != string_view::npos) { + if (allowlist.substr(cur, next - cur).compare(item) == 0) { + return true; + } + next++; + } else { + if (allowlist.substr(cur).compare(item) == 0) { + return true; + } + break; + } + } + return false; +} + +// Returns true iff the given op name is on the allowlist +// and should be registered +constexpr bool op_allowlist_check(string_view op_name) { + assert(op_name.find("::") != string_view::npos); + // Use assert() instead of throw() due to a gcc bug. See: + // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function + // https://github.com/fmtlib/fmt/issues/682 + assert(op_name.find("(") == string_view::npos); +#if !defined(TORCH_OPERATOR_WHITELIST) + // If the TORCH_OPERATOR_WHITELIST parameter is not defined, + // all ops are to be registered + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_OPERATOR_WHITELIST), + // This function is majorly used for mobile selective build with + // root operators, where the overload is included in the allowlist. + op_name); + // // Strip overload name (as allowlist doesn't contain overloads) + // // Another function based on this may be added when there's usage + // // on op names without overload. + // OperatorNameView::parse(op_name).name); +#endif +} + +// Returns true iff the given schema string is on the allowlist +// and should be registered +constexpr bool schema_allowlist_check(string_view schema) { +#if defined(TORCH_FORCE_SCHEMA_REGISTRATION) + return true; +#else + return op_allowlist_check(schema.substr(0, schema.find("("))); +#endif +} + +// Returns true iff the given custom class name is on the allowlist +// and should be registered +constexpr bool custom_class_allowlist_check(string_view custom_class_name) { +#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST) + // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined, + // all custom classes are to be registered + (void)custom_class_name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST), + custom_class_name); +#endif +} + +// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. +// Add this API to pass arbitrary allowlist. +constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) { + return allowlist_contains(allowlist, schema.substr(0, schema.find("("))); +} + +// Returns true iff the given dispatch key is on the allowlist +// and should be registered. When we turn this on, the list of valid +// mobile dispatch keys is hard coded (but you need to make sure +// that you have the correct set of dispatch keys for this). +constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) { +#ifdef C10_MOBILE + return true; + // Disabled for now: to be enabled later! + // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll; +#else + return true; +#endif +} + +} // namespace impl +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h new file mode 100644 index 0000000000000000000000000000000000000000..d874763cfcc821d6dd4d37710c09935f23f808c2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_registration.h @@ -0,0 +1,596 @@ +#pragma once + +/** + * Include this file if you want to register operators. It includes all + * functionality needed to do so for you. + */ + +#include +#include +#include +#include +#include +#include +#include +#if defined(EXPOSE_C2_OPS) || !defined(CAFFE2_IS_XPLAT_BUILD) +#include +#endif +#include + +namespace c10 { + +namespace detail { +// The first argument of the schema might be of type DispatchKeySet, in which case we remove it. +// We do this because every argument in a function schema is expected to be convertable +// to an ivalue, but DispatchKeySet is not a type we want the jit to be aware of. +// See Note [Plumbing Keys Through The Dispatcher] +template +std::unique_ptr inferFunctionSchemaFromFunctor() { + using func_type = typename c10::remove_DispatchKeySet_arg_from_func::func_type; + return std::make_unique(inferFunctionSchemaFlattenedReturns()); +} +} + +/** + * An instance of this class handles the registration for one or more operators. + * Make sure you keep the RegisterOperators instance around since it will + * deregister the operator it's responsible for in its destructor. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ +class TORCH_API RegisterOperators final { +public: + RegisterOperators() = default; + ~RegisterOperators() = default; + + RegisterOperators(const RegisterOperators&) = delete; + RegisterOperators& operator=(const RegisterOperators&) = delete; + RegisterOperators(RegisterOperators&&) noexcept = default; + RegisterOperators& operator=(RegisterOperators&&) noexcept = default; + + class TORCH_API Options final { + public: + Options(const Options&) = delete; + Options(Options&&) noexcept = delete; + Options& operator=(const Options&) = delete; + Options& operator=(Options&&) noexcept = delete; + + // internal-only for registering stack based kernels + template + Options&& kernel(DispatchKey dispatch_key) && { + return std::move(*this).kernel(dispatch_key, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal-only for registering stack based catch-all kernels + template + Options&& catchAllKernel() && { + return std::move(*this).kernel(c10::nullopt, KernelFunction::makeFromBoxedFunction(), nullopt, nullptr); + } + + // internal only for registering caffe2 ops + Options&& schema(FunctionSchema&& schema) { + TORCH_CHECK(!schemaOrName_.has_value(), "You can only specify the schema once per operator registration."); + schemaOrName_ = FunctionSchema(std::move(schema)); + return std::move(*this); + } + + /** + * Use this to specify the schema for an operator. You can also specify + * the operator name only to have the function signature part of the + * schema be inferred from the kernel function. + * + * Example: + * + * > // Infer function signature from my_kernel_cpu + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * > + * > + * > // Explicitly specify full schema + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op(Tensor a) -> Tensor") + * > .kernel(DispatchKey::CPU)); + */ + Options&& schema(const std::string& schemaOrName) { + TORCH_CHECK(!schemaOrName_.has_value(), "Tried to register operator ", schemaOrName," but specified schema multiple times. You can only specify the schema once per operator registration."); + + #if !defined(EXPOSE_C2_OPS) && defined(CAFFE2_IS_XPLAT_BUILD) + throw std::logic_error("Tried to register operator " + schemaOrName + ". We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build."); + #else + schemaOrName_ = torch::jit::parseSchemaOrName(schemaOrName); + #endif + + return std::move(*this); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, "some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a functor. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + * + * The functor constructor can take arguments to configure the kernel. + * The arguments are defined in the kernel registration. + * Example: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > explicit my_kernel_cpu(std::string some_configuration, int a, bool b) + * > : ... {...} + * > + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel("some_configuration", 3, true)); + */ + template + // enable_if: only enable it if KernelFunctor is actually a functor + std::enable_if_t::value, Options&&> catchAllKernel(ConstructorParameters&&... constructorParameters) && { + static_assert(std::is_base_of::value, "Tried to register a kernel functor using the kernel() API, but it doesn't inherit from c10::OperatorKernel. Please have the functor inherit from it."); + static_assert(std::is_constructible::value, "Wrong argument list for constructor of kernel functor. The arguments to kernel(arguments...) must match one of the constructors of Functor."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunctor(std::make_unique(std::forward(constructorParameters)...)), + impl::CppSignature::make(), + detail::inferFunctionSchemaFromFunctor() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU)); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented by a function. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * Example: + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * > + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel() && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + static_assert(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedFunction(TORCH_FN(kernel_func)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>::type>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> kernel(DispatchKey dispatch_key, FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + template + // enable_if: only enable it if FuncType is actually a function + std::enable_if_t::value, Options&&> catchAllKernel(FuncType* kernel_func) && { + static_assert(!std::is_same::value, "Tried to register a stackbased (i.e. internal) kernel function using the public kernel<...>() API. Please either use the internal kernel(...) API or also implement the kernel function as defined by the public API."); + TORCH_INTERNAL_ASSERT(kernel_func != nullptr, "Kernel function cannot be nullptr"); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(kernel_func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is only called for inputs matching the given dispatch key. + * You can register multiple kernels for different dispatch keys. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .kernel(DispatchKey::CPU, [] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> kernel(DispatchKey dispatch_key, Lambda&& functor) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. A functor kernel with cache gets a new instance of + // its cache each time the kernel is looked up from the dispatch table. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // So, instead of making users having to think about it (including the thread-safety + // issues this causes), let's just forbid stateful lambdas altogether. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + dispatch_key, + KernelFunction::makeFromUnboxedLambda(std::forward(functor)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + /** + * Use this to register an operator whose kernel is implemented as a lambda. + * The kernel is a catch-all kernel, meaning it's called independent from + * the input. Dispatch is disabled for this operator. + * + * The lambda must be stateless, i.e. not have a capture. If your kernel + * needs to store some configuration parameters, write the kernel as a + * functor instead. + * + * Example: + * + * > static auto registry = c10::RegisterOperators() + * > .op(c10::RegisterOperators::options() + * > .schema("my_op") + * > .catchAllKernel([] (Tensor a) -> Tensor {...})); + */ + template + // enable_if: only enable it if Lambda is a functor (note: lambdas are functors) + std::enable_if_t< + guts::is_functor>::value + && !std::is_same>::func_type, KernelFunction::BoxedKernelFunction>::value, + Options&&> catchAllKernel(Lambda&& lambda) && { + static_assert(!std::is_base_of>::value, "The kernel(x) API for registering a kernel is only meant to be used with lambdas. Your kernel is a functor. Please use the kernel() API instead."); + + // We don't support stateful lambdas (i.e. lambdas with a capture), because their + // behavior would be nonobvious. + // A lambda with a capture would be global and share its capture between all kernel lookups. + // This would be a likely source for unexpected race conditions, so we forbid it. + // If a kernel really needs global state, they can just have regular global state + // in their .cpp file next to the kernel lambda. + static_assert(guts::is_stateless_lambda>::value, "The kernel(x) API for registering a kernel only works for stateless lambdas (i.e. lambdas without captures). If you need a cache, please use the functor based API kernel() instead."); + + return std::move(*this).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + ); + } + + Options&& aliasAnalysis(AliasAnalysisKind aliasAnalysisKind) && { + TORCH_CHECK(!aliasAnalysisKind_.has_value(), "You can only call aliasAnalysis() once per operator registration."); + aliasAnalysisKind_ = aliasAnalysisKind; + return std::move(*this); + } + + private: + Options&& kernel(std::optional dispatch_key, KernelFunction&& func, std::optional cpp_signature, std::unique_ptr&& inferred_function_schema) && { + KernelRegistrationConfig config; + config.dispatch_key = dispatch_key; + config.func = std::move(func); + config.cpp_signature = cpp_signature; + config.inferred_function_schema = std::move(inferred_function_schema); + kernels.push_back(std::move(config)); + return std::move(*this); + } + + Options() + : schemaOrName_(c10::nullopt) + , kernels() + , aliasAnalysisKind_(c10::nullopt) + {} + + // KernelRegistrationConfig accumulates all information from the config + // parameters passed to a RegisterOperators::op() call into one object. + struct KernelRegistrationConfig final { + KernelRegistrationConfig() + : dispatch_key(c10::nullopt) + , func() + , cpp_signature(c10::nullopt) + , inferred_function_schema(nullptr) + {} + + std::optional dispatch_key; + KernelFunction func; + std::optional cpp_signature; + std::unique_ptr inferred_function_schema; + }; + + std::optional> schemaOrName_; + + std::vector kernels; + optional aliasAnalysisKind_; + friend class RegisterOperators; + friend class Library; + }; + + /** + * Call this to get an instance of registration options, which + * can be passed to a call to RegisterOperators::op() to specify + * these options for the operator registration. + * See class doc comment for examples. + */ + static Options options() { + return {}; + } + + /** + * Call this to register an operator. See class doc comment for examples. + */ + RegisterOperators&& op(Options&& options) && { + checkSchemaAndRegisterOp_(std::move(options)); + return std::move(*this); + } + + // Regular mutator version of the && version above + RegisterOperators& op(Options&& options) & { + checkSchemaAndRegisterOp_(std::move(options)); + return *this; + } + + /** + * This is a shorthand for RegisterOperators::op(Options) where you can + * specify the operator schema outside of the options parameter. + * See class doc comment for examples. + */ + RegisterOperators&& op(const std::string& schemaOrName, Options&& options = RegisterOperators::options()) && { + return std::move(*this).op(std::move(options).schema(schemaOrName)); + } + + // internal only for registering caffe2 ops + RegisterOperators&& op(FunctionSchema schema, Options&& options) && { + return std::move(*this).op(std::move(options).schema(std::move(schema))); + } + + template + explicit RegisterOperators(const std::string& schemaOrName, FuncType&& func, Options&& options = RegisterOperators::options()) + : RegisterOperators() { + std::move(*this).op(schemaOrName, std::forward(func), std::move(options)); + } + + /** + * This API registers an operator based on a kernel function pointer. + * + * Given a kernel + * + * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", &my_kernel_cpu); + * + * If your kernel is small and the overhead of calling it matters, + * then this API might be the wrong choice since the following API + * has a slightly lower overhead for calling into the kernel: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + * + * Or, alternatively, write your kernel as a functor: + * + * > namespace { + * > class my_kernel_cpu final : public c10::OperatorKernel { + * > public: + * > Tensor operator()(Tensor a, Tensor b) {...} + * > }; + * > } + * > + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .kernel()); + */ + template + // enable_if: only enable it if FuncType is actually a function, but not a stack based BoxedKernelFunction. + std::enable_if_t::value && !std::is_same::value, RegisterOperators&&> + op(const std::string& schemaOrName, FuncType* func, Options&& options = RegisterOperators::options()) && { + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedRuntimeFunction(func), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + /** + * This API registers an operator based on a kernel lambda. + * + * This API looks like: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", [] (Tensor a, Tensor b) {...}); + * + * This is equivalent to: + * + * > static auto registry = c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); + * + */ + template + // enable_if: only enable it if Lambda is actually a stateless lambda + std::enable_if_t::value && guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + + template + C10_DEPRECATED_MESSAGE("Registering operator kernels with stateful lambdas (i.e. lambdas with a capture) has non-obvious behavior. This is deprecated. Please use a lambda without a capture or a functor class instead.") + // enable_if: only enable it if Lambda is actually a functor but not a stateless lambda + std::enable_if_t::value && !guts::is_stateless_lambda>::value, RegisterOperators&&> + op(const std::string& schemaOrName, Lambda&& lambda, Options&& options = RegisterOperators::options()) && { + static_assert(!std::is_base_of::value, "c10::OperatorKernel is part of the new kernel registration API and shouldn't be used together with the deprecated registration API. Please use the new RegisterOperators::options().kernel() based API instead."); + + constexpr bool AllowLegacyTypes = true; + return std::move(*this).op(std::move(options).schema(schemaOrName).kernel( + c10::nullopt, + KernelFunction::makeFromUnboxedLambda(std::forward(lambda)), + impl::CppSignature::make(), + // TODO Do schema inference without relying on WrapFunctionIntoRuntimeFunctor + detail::inferFunctionSchemaFromFunctor>>() + )); + } + +private: + void checkSchemaAndRegisterOp_(Options&& config); + + static c10::FunctionSchema inferSchemaFromKernels_(const OperatorName& opNameStr, const Options& options); + void checkNoDuplicateKernels_(const Options& options); + void registerOp_(Options&& options); + + std::vector registrars_; +}; + +} // namespace c10 + +namespace torch { + // Old-style API + using RegisterOperators = c10::RegisterOperators; +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h new file mode 100644 index 0000000000000000000000000000000000000000..9bb1bfccc42a1971568346fbb6bce859d0f3018a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h @@ -0,0 +1,14 @@ +/// Flush-To-Zero and Denormals-Are-Zero mode +/// +/// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass +/// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64 +/// and some x86 CPUs. They result in reduced precision for values near zero, +/// but increased performance. +/// +/// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz + +namespace at::cpu { + +bool set_flush_denormal(bool on); + +} // namespace at::cpu diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..805c7c64a21b6260b17bb73e569e3d13e5a72f3e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace at::cpu { + +TORCH_API bool is_cpu_support_avx2(); +TORCH_API bool is_cpu_support_avx512(); + +// Detect if CPU support Vector Neural Network Instruction. +TORCH_API bool is_cpu_support_vnni(); + +} // namespace at::cpu diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..388b3170d5b55a8c4bdd3af4ff982397fb323cb6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h new file mode 100644 index 0000000000000000000000000000000000000000..48d44dc42c33ceb8f78eee94d2f088a728598071 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h @@ -0,0 +1,358 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include + +namespace at::vec { + +// slow path +template +inline scalar_t vec_reduce_all( + const Op& vec_fun, + vec::Vectorized acc_vec, + int64_t size) { + using Vec = vec::Vectorized; + scalar_t acc_arr[Vec::size()]; + acc_vec.store(acc_arr); + for (const auto i : c10::irange(1, size)) { + std::array acc_arr_next = {0}; + acc_arr_next[0] = acc_arr[i]; + Vec acc_vec_next = Vec::loadu(acc_arr_next.data()); + acc_vec = vec_fun(acc_vec, acc_vec_next); + } + acc_vec.store(acc_arr); + return acc_arr[0]; +} + +template +struct VecReduceAllSIMD { + static inline scalar_t apply(const Op& vec_fun, const Vectorized& acc_vec) { + return vec_reduce_all(vec_fun, acc_vec, Vectorized::size()); + } +}; + +#if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) +#if defined(CPU_CAPABILITY_AVX2) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 128-bit shuffle + Vec v1 = _mm256_permute2f128_ps(v, v, 0x1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm256_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm256_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX2) +#if defined(CPU_CAPABILITY_AVX512) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + // 256-bit shuffle + Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E); + v = vec_fun(v, v1); + // 128-bit shuffle + v1 = _mm512_shuffle_f32x4(v, v, 0xB1); + v = vec_fun(v, v1); + // 64-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0x4E); + v = vec_fun(v, v1); + // 32-bit shuffle + v1 = _mm512_shuffle_ps(v, v, 0xB1); + v = vec_fun(v, v1); + return _mm512_cvtss_f32(v); + } +}; +#endif // defined(CPU_CAPABILITY_AVX512) +#endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) + +#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +template +struct VecReduceAllSIMD { + static inline float apply(const Op& vec_fun, const Vectorized& acc_vec) { + using Vec = Vectorized; + Vec v = acc_vec; + + // 128-bit shuffle: [a1, a2, a3, a4, a5, a6, a7, a8] -> [a5, a6, a7, a8, a1, a2, a3, a4] + Vec v1 = {v.get_high(), v.get_low()}; + // [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] ('+' stands for the reduction function. Note that the last 4 elements are not required) + v = vec_fun(v, v1); + + // 64-bit shuffle: [a1+a5, a2+a6, a3+a7, a4+a8, -, -, -, -] -> [a3+a7, a4+a8, a1+a5, a2+a6, -, -, -, -] + float32x4_t v1_1 = vextq_f32(v.get_low(), v.get_low(), 2); + v1 = {v1_1, v1_1}; + // [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] + v = vec_fun(v, v1); + + // 32-bit shuffle: [a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, -, -, -, -] -> [a2+a4+a6+a8, a1+a3+a5+a7, a2+a4+a6+a8, a1+a3+a5+a7, -, -, -, -] + v1_1 = vrev64q_f32(v.get_low()); + v1 = {v1_1, v1_1}; + // [a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, a1+a2+a3+a4+a5+a6+a7+a8, -, -, -, -] + v = vec_fun(v, v1); + + return v.get_low()[0]; + } +}; +#endif // defined(__aarch64__) + +template +inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized& acc_vec) { + return VecReduceAllSIMD::apply(vec_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(vec_fun, Vec::loadu(data, size), size); + int64_t d = Vec::size(); + Vec acc_vec = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec = vec_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(vec_fun, acc_vec); +} + +// similar to reduce_all, but reduces into two outputs +template , int> = 0> +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const scalar_t* data, int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + auto loaded_data = Vec::loadu(data, size); + return std::pair( + vec_reduce_all(vec_fun1, loaded_data, size), + vec_reduce_all(vec_fun2, loaded_data, size)); + } + int64_t d = Vec::size(); + Vec acc_vec1 = Vec::loadu(data); + Vec acc_vec2 = Vec::loadu(data); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + acc_vec1 = vec_fun1(acc_vec1, data_vec); + acc_vec2 = vec_fun2(acc_vec2, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d); + acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d); + } + return std::pair( + vec_reduce_all(vec_fun1, acc_vec1), + vec_reduce_all(vec_fun2, acc_vec2)); +} + +template , int> = 0> +inline scalar_t map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) + return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size); + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + data_vec = map_fun(data_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + data_vec = map_fun(data_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + data_vec = map_fun(data_vec, data2_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + data_vec = map_fun(data_vec, data2_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline scalar_t map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + const scalar_t* data3, + int64_t size) { + using Vec = vec::Vectorized; + if (size < Vec::size()) { + Vec data_vec = Vec::loadu(data, size); + Vec data2_vec = Vec::loadu(data2, size); + Vec data3_vec = Vec::loadu(data3, size); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + return vec_reduce_all(red_fun, data_vec, size); + } + + int64_t d = Vec::size(); + Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3)); + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(data + d); + Vec data2_vec = Vec::loadu(data2 + d); + Vec data3_vec = Vec::loadu(data3 + d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = red_fun(acc_vec, data_vec); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(data + d, size - d); + Vec data2_vec = Vec::loadu(data2 + d, size - d); + Vec data3_vec = Vec::loadu(data3 + d, size - d); + data_vec = map_fun(data_vec, data2_vec, data3_vec); + acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); + } + return vec_reduce_all(red_fun, acc_vec); +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d)); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d)); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map2( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + const scalar_t* input_data2, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec = Vec::loadu(input_data + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec = Vec::loadu(input_data + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec output_vec = vec_fun(data_vec, data_vec2); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map3( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); + output_vec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map4( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + const scalar_t* input_data4, + int64_t size) { + using Vec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec data_vec1 = Vec::loadu(input_data1 + d); + Vec data_vec2 = Vec::loadu(input_data2 + d); + Vec data_vec3 = Vec::loadu(input_data3 + d); + Vec data_vec4 = Vec::loadu(input_data4 + d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d); + } + if (size - d > 0) { + Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); + Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); + Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); + Vec data_vec4 = Vec::loadu(input_data4 + d, size - d); + Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); + output_vec.store(output_data + d, size - d); + } +} + +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..3bd22b3820f0b13d6d518329dd7df687ced37948 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h @@ -0,0 +1,549 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +namespace at::vec { + +// BFloat16 specification +template struct VecScalarType { using type = scalar_t; }; +template <> struct VecScalarType { using type = float; }; +template <> struct VecScalarType { using type = float; }; + +// This is different from at::acc_type since we only need to specialize BFloat16 +template +using vec_scalar_t = typename VecScalarType::type; + +// Vector conversion between float and bfloat16/half +template , int> = 0> +inline std::tuple, Vectorized> convert_to_float(const Vectorized&); + +template <> +inline std::tuple, Vectorized> convert_to_float (const Vectorized& a) { + return convert_bfloat16_float(a); +} + +template <> +inline std::tuple, Vectorized> convert_to_float (const Vectorized& a) { + return convert_half_float(a); +} + +template , int> = 0> +inline Vectorized convert_from_float(const Vectorized&, const Vectorized&); + +template <> +inline Vectorized convert_from_float(const Vectorized& a, const Vectorized& b) { + return convert_float_bfloat16(a, b); +} + +template <> +inline Vectorized convert_from_float(const Vectorized& a, const Vectorized& b) { + return convert_float_half(a, b); +} + +template , int> = 0> +inline void load_to_float(const scalar_t *data, Vectorized &out1, Vectorized &out2); + +template <> +inline void load_to_float (const BFloat16 *data, Vectorized &out1, Vectorized &out2) { + load_fp32_from_bf16(data, out1, out2); +} + +template <> +inline void load_to_float (const Half *data, Vectorized &out1, Vectorized &out2) { + load_fp32_from_fp16(data, out1, out2); +} + +template , int> = 0> +inline void load_to_float(const scalar_t *data, Vectorized &out); + +template <> +inline void load_to_float (const BFloat16 *data, Vectorized &out) { + load_fp32_from_bf16(data, out); +} + +template <> +inline void load_to_float (const Half *data, Vectorized &out) { + load_fp32_from_fp16(data, out); +} + +// Note that we already have specialized member of Vectorized for BFloat16 +// so the following functions would run smoothly: +// using Vec = Vectorized; +// Vec one = Vec(BFloat16(1)); +// vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N); +// +// Then why we still need to specialize "functional"? +// If we do specialization at Vectorized<> level, the above example would need 3 pairs of +// conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/". +// If we do specialization at vec::map<>() level, we have only 1 pair of conversion +// of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only. +// +// The following BFloat16 functionality will only do data type conversion for input +// and output vector (reduce functionality will only convert the final scalar back to bf16). +// Compared to Vectorized<> specialization, +// 1. better performance since we have less data type conversion; +// 2. less rounding error since immediate results are kept in fp32; +// 3. accumulation done on data type of fp32. +// +// If you plan to extend this file, please ensure adding unit tests at +// aten/src/ATen/test/vec_test_all_types.cpp +// +template , int> = 0> +inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(vec_fun, data_fvec0, fVec::size()); + } else { + return vec_reduce_all(vec_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = vec_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(vec_fun, acc_fvec0); +} + +template , int> = 0> +inline std::pair reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, + const scalar_t* data, int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size()); + fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size()); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec, fVec::size()), + vec_reduce_all(vec_fun2, acc2_fvec, fVec::size())); + } else { + return std::pair( + vec_reduce_all(vec_fun1, data_fvec0, size), + vec_reduce_all(vec_fun2, data_fvec0, size)); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc1_fvec0, acc1_fvec1] = convert_to_float(acc_bvec); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc_bvec); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); + acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size()); + acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); + acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size()); + } else { + acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d); + acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d); + } + } + acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1); + acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1); + return std::pair( + vec_reduce_all(vec_fun1, acc1_fvec0), + vec_reduce_all(vec_fun2, acc2_fvec0)); +} + +template , int> = 0> +inline float map_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + acc_fvec0 = map_fun(acc_fvec0); + acc_fvec1 = map_fun(acc_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0); + data_fvec1 = map_fun(data_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline float map2_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc2_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline float map3_reduce_all( + const MapOp& map_fun, + const ReduceOp& red_fun, + const scalar_t* data, + const scalar_t* data2, + const scalar_t* data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + if (size < bVec::size()) { + bVec data_bvec = bVec::loadu(data, size); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2, size); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3, size); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + if (size > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); + return vec_reduce_all(red_fun, data_fvec0, fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + return vec_reduce_all(red_fun, data_fvec0, size); + } + } + int64_t d = bVec::size(); + bVec acc_bvec = bVec::loadu(data); + auto [acc_fvec0, acc_fvec1] = convert_to_float(acc_bvec); + bVec acc2_bvec = bVec::loadu(data2); + auto [acc2_fvec0, acc2_fvec1] = convert_to_float(acc2_bvec); + bVec acc3_bvec = bVec::loadu(data3); + auto [acc3_fvec0, acc3_fvec1] = convert_to_float(acc3_bvec); + acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0); + acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1); + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = red_fun(acc_fvec1, data_fvec1); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + if (size - d > fVec::size()) { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); + acc_fvec0 = red_fun(acc_fvec0, data_fvec0); + acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); + } else { + data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); + acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); + } + } + acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); + return vec_reduce_all(red_fun, acc_fvec0); +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map( + const Op& vec_fun, + scalar_t* output_data, + const float* input_data, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + fVec data_fvec0 = fVec::loadu(input_data + d); + fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size()); + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + fVec data_fvec0, data_fvec1; + if (size - d > fVec::size()) { + data_fvec0 = fVec::loadu(input_data + d); + data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size()); + } else { + // choose to align with behaviour of bVec::loadu(ptr, size), + // which leaves data_fvec1 uninitialized + data_fvec0 = fVec::loadu(input_data + d, size - d); + } + fVec output_fvec0 = vec_fun(data_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map2( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data, + const scalar_t* input_data2, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data_bvec = bVec::loadu(input_data + d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data_bvec = bVec::loadu(input_data + d, size - d); + auto [data_fvec0, data_fvec1] = convert_to_float(data_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); + fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map3( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +template , int> = 0> +inline void map4( + const Op& vec_fun, + scalar_t* output_data, + const scalar_t* input_data1, + const scalar_t* input_data2, + const scalar_t* input_data3, + const scalar_t* input_data4, + int64_t size) { + using bVec = vec::Vectorized; + using fVec = vec::Vectorized; + int64_t d = 0; + for (; d < size - (size % bVec::size()); d += bVec::size()) { + bVec data1_bvec = bVec::loadu(input_data1 + d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d); + auto [data4_fvec0, data4_fvec1] = convert_to_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d); + } + if (size - d > 0) { + bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); + auto [data1_fvec0, data1_fvec1] = convert_to_float(data1_bvec); + bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); + auto [data2_fvec0, data2_fvec1] = convert_to_float(data2_bvec); + bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); + auto [data3_fvec0, data3_fvec1] = convert_to_float(data3_bvec); + bVec data4_bvec = bVec::loadu(input_data4 + d, size - d); + auto [data4_fvec0, data4_fvec1] = convert_to_float(data4_bvec); + fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); + fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); + bVec output_bvec = convert_from_float(output_fvec0, output_fvec1); + output_bvec.store(output_data + d, size - d); + } +} + +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..a82a8ef1a69457d4800f6c3de277c82f61dfa03c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h @@ -0,0 +1,43 @@ +#pragma once +#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +/* GCC or clang-compatible compiler, targeting x86/x86-64 */ +#include +#elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* Clang-compatible compiler, targeting arm neon */ +#include +#elif defined(_MSC_VER) +/* Microsoft C/C++-compatible compiler */ +#include +#if _MSC_VER <= 1900 +#define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2)) +#define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4)) +#define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8)) +#define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16)) +#endif +#elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__)) +/* GCC-compatible compiler, targeting ARM with NEON */ +#include +#if defined (MISSING_ARM_VLD1) +#include +#elif defined (MISSING_ARM_VST1) +#include +#endif +#elif defined(__GNUC__) && defined(__IWMMXT__) +/* GCC-compatible compiler, targeting ARM with WMMX */ +#include +#elif defined(__s390x__) +// targets Z/architecture +// we will include vecintrin later +#elif (defined(__GNUC__) || defined(__xlC__)) && \ + (defined(__VEC__) || defined(__ALTIVEC__)) +/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */ +#include +/* We need to undef those tokens defined by to avoid conflicts + with the C++ types. => Can still use __bool/__vector */ +#undef bool +#undef vector +#undef pixel +#elif defined(__GNUC__) && defined(__SPE__) +/* GCC-compatible compiler, targeting PowerPC with SPE */ +#include +#endif diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h new file mode 100644 index 0000000000000000000000000000000000000000..c6538dc6cbbc95f17766edaff189fea704ce99fb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h @@ -0,0 +1,47 @@ +#pragma once + +#if defined(CPU_CAPABILITY_AVX512) +#include +#else +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline Vectorized convert_to_bool(Vectorized x) { + __at_align__ bool buffer[x.size()]; + x.ne(Vectorized(0)).store(buffer); + + Vectorized ret; + static_assert(x.size() == ret.size(), ""); + std::memcpy(ret, buffer, ret.size() * sizeof(bool)); + return ret; +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr)); +} + +template <> +inline Vectorized Vectorized::loadu(const void* ptr, int64_t count) { + // See NOTE [Loading boolean values] + return convert_to_bool(Vectorized::loadu(ptr, count)); +} + +template +struct VecHoldType { using hold_type = typename VT::value_type; }; + +template <> +struct VecHoldType> { using hold_type = BFloat16; }; + +template <> +struct VecHoldType> {using hold_type = Half; }; + +template +using vechold_type = typename VecHoldType::hold_type; + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..711d16f9b231f0de8ef7950de809337027b1b2ee --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h @@ -0,0 +1,8 @@ +/* Workaround for missing vst1q_f32_x2 in gcc-8. */ + +__extension__ extern __inline void +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) +{ + asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..19e0320d8abf6ba9e787c410e37bb9c9f9e223ca --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h @@ -0,0 +1,1174 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wignored-qualifiers" + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +#ifndef SLEEF_CONST +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define SLEEF_CONST const +#else +#define SLEEF_CONST +#endif +#define SLEEF_CONST_OLD SLEEF_CONST +#else +#define SLEEF_CONST_OLD +#endif + +// bfloat16 conversion +static inline void cvtbf16_fp32(const __m128i& a, __m256& o) { + o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16)); +} + +static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) { + __m128i lo = _mm256_extractf128_si256(a, 0); + __m128i hi = _mm256_extractf128_si256(a, 1); + cvtbf16_fp32(lo, o1); + cvtbf16_fp32(hi, o2); +} + +static inline __m128i cvtfp32_bf16(const __m256& src) { + __m256i value = _mm256_castps_si256(src); + __m256i nan = _mm256_set1_epi32(0xffff); + __m256i mask = _mm256_castps_si256(_mm256_cmp_ps(src, src, _CMP_ORD_Q)); + __m256i ones = _mm256_set1_epi32(0x1); + __m256i vec_bias = _mm256_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_value = _mm256_and_si256(_mm256_srli_epi32(value, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_value = _mm256_add_epi32(t_value, vec_bias); + // input += rounding_bias; + t_value = _mm256_add_epi32(t_value, value); + // input = input >> 16; + t_value = _mm256_srli_epi32(t_value, 16); + // Check NaN before converting back to bf16 + t_value = _mm256_blendv_epi8(nan, t_value, mask); + t_value = _mm256_packus_epi32(t_value, t_value); // t[4-7] t[4-7] t[0-4] t[0-4] + t_value = _mm256_permute4x64_epi64(t_value, 0xd8); // 11 01 10 00 + return _mm256_castsi256_si128(t_value); +} + +static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + __m256i nan = _mm256_set1_epi32(0xffff); + __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q)); + __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q)); + __m256i ones = _mm256_set1_epi32(0x1); + __m256i vec_bias = _mm256_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones); + auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_lo = _mm256_add_epi32(t_lo, vec_bias); + t_hi = _mm256_add_epi32(t_hi, vec_bias); + // input += rounding_bias; + t_lo = _mm256_add_epi32(t_lo, lo); + t_hi = _mm256_add_epi32(t_hi, hi); + // input = input >> 16; + t_lo = _mm256_srli_epi32(t_lo, 16); + t_hi = _mm256_srli_epi32(t_hi, 16); + // Check NaN before converting back to bf16 + t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo); + t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi); + + t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4] + return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00 +} + +static inline __m256i merge_compare_result(const __m256& a, const __m256& b) { + __m256i lo = _mm256_castps_si256(a); + __m256i hi = _mm256_castps_si256(b); + lo = _mm256_srli_epi32(lo, 16); + hi = _mm256_srli_epi32(hi, 16); + auto out = _mm256_packus_epi32(lo, hi); + return _mm256_permute4x64_epi64(out, 0xd8); +} + +// float16 conversion +static inline void cvtfp16_fp32(const __m128i& a, __m256& o) { + o = _mm256_cvtph_ps(a); +} + +static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) { + __m128i lo = _mm256_extractf128_si256(a, 0); + __m128i hi = _mm256_extractf128_si256(a, 1); + cvtfp16_fp32(lo, o1); + cvtfp16_fp32(hi, o2); +} + +static inline __m128i cvtfp32_fp16(const __m256& src) { + return _mm256_cvtps_ph( + src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); +} + +static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) { + __m128i lo = _mm256_cvtps_ph( + a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + __m128i hi = _mm256_cvtps_ph( + b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); +} + +// dtype conversion between float16/bfloat16 and float32 +template , int> = 0> +inline void cvt_to_fp32(const __m128i& a, __m256& o); +template <> inline void cvt_to_fp32(const __m128i& a, __m256& o) { + cvtbf16_fp32(a, o); +}; +template <> inline void cvt_to_fp32(const __m128i& a, __m256& o) { + cvtfp16_fp32(a, o); +} + +template , int> = 0> +inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2); +template <> inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2) { + cvtbf16_fp32(a, o1, o2); +} +template <> inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2) { + cvtfp16_fp32(a, o1, o2); +} + +template , int> = 0> +inline __m256i cvt_from_fp32(const __m256& a, const __m256& b); +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_bf16(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return merge_compare_result(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_fp16(a, b); +} +template <> inline __m256i cvt_from_fp32(const __m256& a, const __m256& b) { + return cvtfp32_fp16(a, b); +} + +template +class Vectorized16 { +static_assert( + is_reduced_floating_point_v, + "Support only float16 and bfloat16."); +protected: + __m256i values; +public: + using value_type = uint16_t; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized16() {} + Vectorized16(__m256i v) : values(v) {} + Vectorized16(T val) { + value_type uw = val.x; + values = _mm256_set1_epi16(uw); + } + Vectorized16(T val1, T val2, T val3, T val4, + T val5, T val6, T val7, T val8, + T val9, T val10, T val11, T val12, + T val13, T val14, T val15, T val16) { + values = _mm256_setr_epi16( + val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x, + val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x); + } + operator __m256i() const { + return values; + } + T& operator[](int idx) = delete; + const T& operator[](int idx) const = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0)); + return _mm256_movemask_epi8(cmp); + } + static Vectorized loadu(const void* ptr, int16_t count = size()) { + if (count == size()) + return _mm256_loadu_si256(reinterpret_cast(ptr)); + + __at_align__ int16_t tmp_values[size()]; + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return _mm256_loadu_si256(reinterpret_cast(tmp_values)); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, + const Vectorized& b, const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(T base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized set(const Vectorized& a, + const Vectorized& b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + + Vectorized map(SLEEF_CONST __m256 (*SLEEF_CONST_OLD vop)(__m256)) const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + const auto o1 = vop(lo); + const auto o2 = vop(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized isnan() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); + return merge_compare_result(lo, hi); + } + Vectorized abs() const { + return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values); + } + Vectorized angle() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto angle_lambda = [](__m256 values_2) { + const auto zero_vec = _mm256_set1_ps(0.f); + const auto nan_vec = _mm256_set1_ps(NAN); + const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_ps(c10::pi); + + const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); + angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); + return angle; + }; + auto o1 = angle_lambda(lo); + auto o2 = angle_lambda(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return map(Sleef_acosf8_u10); + } + Vectorized acosh() const { + return map(Sleef_acoshf8_u10); + } + Vectorized asin() const { + return map(Sleef_asinf8_u10); + } + Vectorized atan() const { + return map(Sleef_atanf8_u10); + } + Vectorized atanh() const { + return map(Sleef_atanhf8_u10); + } + Vectorized atan2(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_atan2f8_u10(lo, b1); + auto o2 = Sleef_atan2f8_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized copysign(const Vectorized &sign) const { + // copy sign bit (0x8000) from sign and remaining bits from values + __m256i mask_value = _mm256_set1_epi32(~0x80008000); + __m256i mask_signbit = _mm256_set1_epi32(0x80008000); + return Vectorized( + _mm256_or_si256( + _mm256_and_si256(values, mask_value), + _mm256_and_si256(sign, mask_signbit))); + } + Vectorized erf() const { + return map(Sleef_erff8_u10); + } + Vectorized erfc() const { + return map(Sleef_erfcf8_u15); + } + Vectorized erfinv() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_erfinv(tmp1[i]); + tmp2[i] = calc_erfinv(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized exp() const { + return map(Sleef_expf8_u10); + } + Vectorized exp2() const { + return map(Sleef_exp2f8_u10); + } + Vectorized expm1() const { + return map(Sleef_expm1f8_u10); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized & q) const { + __m256 x_lo, x_hi; + cvt_to_fp32(values, x_lo, x_hi); + __m256 q_lo, q_hi; + cvt_to_fp32(q.values, q_lo, q_hi); + auto o1 = Sleef_fmodf8(x_lo, q_lo); + auto o2 = Sleef_fmodf8(x_hi, q_hi); + return cvt_from_fp32(o1, o2); + } + Vectorized hypot(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_hypotf8_u05(lo, b1); + auto o2 = Sleef_hypotf8_u05(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_i0(tmp1[i]); + tmp2[i] = calc_i0(tmp2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0e() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_i0e(tmp1[i]); + tmp2[i] = calc_i0e(tmp2[i]); + } + const auto o1 = _mm256_loadu_ps(tmp1); + const auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized digamma() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_digamma(tmp1[i]); + tmp2[i] = calc_digamma(tmp2[i]); + } + const auto o1 = _mm256_loadu_ps(tmp1); + const auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized igamma(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + + Vectorized igammac(const Vectorized &x) const { + __m256 lo, hi; + __m256 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmp1), lo); + _mm256_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm256_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm256_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]); + } + auto o1 = _mm256_loadu_ps(tmp1); + auto o2 = _mm256_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized log() const { + return map(Sleef_logf8_u10); + } + Vectorized log2() const { + return map(Sleef_log2f8_u10); + } + Vectorized log10() const { + return map(Sleef_log10f8_u10); + } + Vectorized log1p() const { + return map(Sleef_log1pf8_u10); + } + Vectorized sin() const { + return map(Sleef_sinf8_u10); + } + Vectorized sinh() const { + return map(Sleef_sinhf8_u10); + } + Vectorized cos() const { + return map(Sleef_cosf8_u10); + } + Vectorized cosh() const { + return map(Sleef_coshf8_u10); + } + Vectorized ceil() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_ceil_ps(lo); + auto o2 = _mm256_ceil_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized floor() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_floor_ps(lo); + auto o2 = _mm256_floor_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized neg() const { + return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000)); + } + Vectorized round() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized tan() const { + return map(Sleef_tanf8_u10); + } + Vectorized tanh() const { + return map(Sleef_tanhf8_u10); + } + Vectorized trunc() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized lgamma() const { + return map(Sleef_lgammaf8_u10); + } + Vectorized sqrt() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm256_sqrt_ps(lo); + auto o2 = _mm256_sqrt_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized reciprocal() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, lo); + auto o2 = _mm256_div_ps(ones, hi); + return cvt_from_fp32(o1, o2); + } + Vectorized rsqrt() const { + __m256 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm256_set1_ps(1); + auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo)); + auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi)); + return cvt_from_fp32(o1, o2); + } + Vectorized pow(const Vectorized &b) const { + __m256 lo, hi; + __m256 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_powf8_u10(lo, b1); + auto o2 = Sleef_powf8_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } +private: + template + Vectorized inline binary_compare(const Vectorized& b, Op op) const { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvt_to_fp32(values, a_lo, a_hi); + cvt_to_fp32(b.values, b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); + } + +public: + Vectorized inline operator>(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); }); + } + Vectorized inline operator<(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); }); + } + Vectorized inline operator>=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); }); + } + Vectorized inline operator<=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); }); + } + Vectorized inline operator==(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); }); + } + Vectorized inline operator!=(const Vectorized& other) const { + return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); }); + } +}; + +template +static inline Vectorized binary_op_as_fp32(const Vectorized& a, const Vectorized& b, Op op) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvt_to_fp32(__m256i(a), a_lo, a_hi); + cvt_to_fp32(__m256i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); }); +} +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto max_lo = _mm256_max_ps(a_lo, b_lo); + auto max_hi = _mm256_max_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(max_lo, nan_lo); + auto o2 = _mm256_or_ps(max_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + auto min_lo = _mm256_min_ps(a_lo, b_lo); + auto min_hi = _mm256_min_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(min_lo, nan_lo); + auto o2 = _mm256_or_ps(min_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo)); + auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi)); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 max_lo, max_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, a_lo); + auto o2 = _mm256_min_ps(max_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(min), min_lo, min_hi); + auto o1 = _mm256_max_ps(min_lo, a_lo); + auto o2 = _mm256_max_ps(min_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, BFloat16* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = _mm256_loadu_ps(&src[i]); + __m256 b = _mm256_loadu_ps(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, BFloat16* dst, int64_t n) { + auto load_float = [](const double *src) -> __m256 { + // Load one float vector from an array of doubles + __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src)); + __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4)); + return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = load_float(&src[i]); + __m256 b = load_float(&src[i + 8]); + + __m256i bf = cvtfp32_bf16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + __m256 c_lo, c_hi; + cvtbf16_fp32(__m256i(a), a_lo, a_hi); + cvtbf16_fp32(__m256i(b), b_lo, b_hi); + cvtbf16_fp32(__m256i(c), c_lo, c_hi); + auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); }); +} +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + auto max_lo = _mm256_max_ps(a_lo, b_lo); + auto max_hi = _mm256_max_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(max_lo, nan_lo); + auto o2 = _mm256_or_ps(max_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + auto min_lo = _mm256_min_ps(a_lo, b_lo); + auto min_hi = _mm256_min_ps(a_hi, b_hi); + auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm256_or_ps(min_lo, nan_lo); + auto o2 = _mm256_or_ps(min_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + __m256 max_lo, max_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(min), min_lo, min_hi); + cvtfp16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo)); + auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi)); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m256 a_lo, a_hi; + __m256 max_lo, max_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(max), max_lo, max_hi); + auto o1 = _mm256_min_ps(max_lo, a_lo); + auto o2 = _mm256_min_ps(max_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m256 a_lo, a_hi; + __m256 min_lo, min_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(min), min_lo, min_hi); + auto o1 = _mm256_max_ps(min_lo, a_lo); + auto o2 = _mm256_max_ps(min_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +inline void convert(const Half* src, Half* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i))); + _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, Half* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = _mm256_loadu_ps(&src[i]); + __m256 b = _mm256_loadu_ps(&src[i + 8]); + + __m256i c = cvtfp32_fp16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, Half* dst, int64_t n) { + auto load_float = [](const double *src) -> __m256 { + // Load one float vector from an array of doubles + __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src)); + __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4)); + return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m256 a = load_float(&src[i]); + __m256 b = load_float(&src[i + 8]); + + __m256i c = cvtfp32_fp16(a, b); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m256 a_lo, a_hi; + __m256 b_lo, b_hi; + __m256 c_lo, c_hi; + cvtfp16_fp32(__m256i(a), a_lo, a_hi); + cvtfp16_fp32(__m256i(b), b_lo, b_hi); + cvtfp16_fp32(__m256i(c), c_lo, c_hi); + auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_fp16(o1, o2); +} + +#define CONVERT_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + __m256 o1, o2; \ + cvt_to_fp32(__m256i(a), o1, o2); \ + return std::make_tuple(o1, o2); \ +} \ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + return cvt_from_fp32(__m256(a), __m256(b)); \ +} +CONVERT_VECTORIZED_INIT(BFloat16, bfloat16); +CONVERT_VECTORIZED_INIT(Half, half); + +#else // defined(CPU_CAPABILITY_AVX2) + +#define CONVERT_NON_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr2); \ + convert(arr2, arr, K); \ + return std::make_tuple( \ + Vectorized::loadu(arr), \ + Vectorized::loadu(arr + Vectorized::size())); \ +} \ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr); \ + b.store(arr + Vectorized::size()); \ + convert(arr, arr2, K); \ + return Vectorized::loadu(arr2); \ +} +CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16); +#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +inline std::tuple, Vectorized> convert_half_float(const Vectorized& a) { + static_assert(Vectorized::size() == 2 * Vectorized::size()); +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + float16x8x2_t arr = a; + float16x8_t x = arr.val[0]; + float16x8_t y = arr.val[1]; +#else + auto arr = reinterpret_cast(a.operator const Half*()); + float16x8_t x = vld1q_f16(arr); + float16x8_t y = vld1q_f16(arr + Vectorized::size()); +#endif + float32x4_t x1 = vcvt_f32_f16(vget_low_f16(x)); + float32x4_t x2 = vcvt_f32_f16(vget_high_f16(x)); + float32x4_t y1 = vcvt_f32_f16(vget_low_f16(y)); + float32x4_t y2 = vcvt_f32_f16(vget_high_f16(y)); + return { Vectorized(x1, x2), Vectorized(y1, y2) }; +} +inline Vectorized convert_float_half(const Vectorized& a, const Vectorized& b) { + static_assert(Vectorized::size() == 2 * Vectorized::size()); + float32x4x2_t x = a; + float32x4x2_t y = b; + float16x4_t x1 = vcvt_f16_f32(x.val[0]); + float16x4_t x2 = vcvt_f16_f32(x.val[1]); + float16x4_t y1 = vcvt_f16_f32(y.val[0]); + float16x4_t y2 = vcvt_f16_f32(y.val[1]); +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + return Vectorized(vcombine_f16(x1, x2), vcombine_f16(y1, y2)); +#else + Vectorized rc; + auto arr = reinterpret_cast(rc.operator Half*()); + vst1q_f16(arr, vcombine_f16(x1, x2)); + vst1q_f16(arr + Vectorized::size(), vcombine_f16(y1, y2)); + return rc; +#endif +} +#else +CONVERT_NON_VECTORIZED_INIT(Half, half); +#endif + +#endif // defined(CPU_CAPABILITY_AVX2) + +#if defined(CPU_CAPABILITY_AVX2) +#define LOAD_FP32_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + auto values = _mm_loadu_si128(reinterpret_cast(data)); \ + __m256 out_values; \ + cvt_to_fp32(values, out_values); \ + out = out_values; \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + auto vec = Vectorized::loadu(data); \ + __m256 out1_values, out2_values; \ + cvt_to_fp32(vec, out1_values, out2_values); \ + out1 = out1_values; \ + out2 = out2_values; \ +} +LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_VECTORIZED_INIT(Half, fp16); + +#else // defined(CPU_CAPABILITY_AVX2) +#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + __at_align__ float values[Vectorized::size()]; \ + for (const auto k : c10::irange(Vectorized::size())) { \ + values[k] = data[k]; \ + } \ + out = Vectorized::loadu(values); \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + load_fp32_from_##name(data, out1); \ + data += Vectorized::size(); \ + load_fp32_from_##name(data, out2); \ +} +LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16); + +#endif +}} // namsepace at::vec::CPU_CAPABILITY + +#pragma GCC diagnostic pop diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h new file mode 100644 index 0000000000000000000000000000000000000000..6c198fb37d3d19be314294c582538e2f652aaab8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h @@ -0,0 +1,432 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized> { +private: + __m256d values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 2; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(c10::complex val) { + double real_value = val.real(); + double imag_value = val.imag(); + values = _mm256_setr_pd(real_value, imag_value, + real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2) { + values = _mm256_setr_pd(val1.real(), val1.imag(), + val2.real(), val2.imag()); + } + operator __m256d() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert (mask > -1 && mask < 4, "Unexpected mask value"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_pd(a.values, b.values, 0x03); + case 2: + return _mm256_blend_pd(a.values, b.values, 0x0c); + case 3: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values); + return _mm256_blendv_pd(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + __at_align__ double tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[2*size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256d abs_2_() const { + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b + } + __m256d abs_() const { + auto real = _mm256_movedup_pd(values); // real real + // movehdup_pd does not exist... + auto imag = _mm256_permute_pd(values, 0xf); // imag imag + return Sleef_hypotd4_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(abs_(), real_mask); // abs 0 + } + __m256d angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_pd(values, 0x05); // b a + return Sleef_atan2d4_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle + return _mm256_and_pd(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_pd(); + auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ); + auto div = _mm256_div_pd(values, abs); + return _mm256_blendv_pd(div, zero, mask); + } + __m256d real_() const { + const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm256_and_pd(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256d imag_() const { + const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF)); + return _mm256_and_pd(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_pd(imag_(), 0x05); //b a + } + __m256d conj_() const { + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + return _mm256_xor_pd(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256d log2_ = _mm256_set1_pd(std::log(2)); + return _mm256_div_pd(log(), log2_); + } + Vectorized> log10() const { + const __m256d log10_ = _mm256_set1_pd(std::log(10)); + return _mm256_div_pd(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256d one = _mm256_set1_pd(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_pd(conj, 0x05); //-b a + auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab + auto im = _mm256_add_pd(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_pd(values, values); // a*a b*b + auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a + re = _mm256_sub_pd(one, re); + + auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln() + } + Vectorized> acos() const { + // acos(x) = pi/2 - asin(x) + constexpr auto pi_2d = c10::pi / 2; + const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0); + return _mm256_sub_pd(pi_2, asin()); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expd4_u10(values); //exp(a) exp(b) + exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05), + sin_cos.x, 0x0A); //cos(b) sin(b) + return _mm256_mul_pd(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m256d ln_2 = _mm256_set1_pd(c10::ln_2); + Vectorized> scaled_values = _mm256_mul_pd(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized> floor() const { + return _mm256_floor_pd(values); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_pd(); + return _mm256_sub_pd(zero, values); + } + Vectorized> round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>&) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_pd(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_pd(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_pd(a, b); //ac bd + + auto d_c = _mm256_permute_pd(b, 0x05); //d c + d_c = _mm256_xor_pd(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc + + auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm256_set1_pd(-0.f); + auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d| + auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c| + auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc + auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc + auto acbd2 = _mm256_mul_pd(a2, b2); + + const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc + dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm256_div_pd(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const{ + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_pd(sign_mask, values); //c -d + return _mm256_div_pd(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_pd(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized> Vectorized>::eq(const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm256_set1_pd(1.0)); +} + +inline Vectorized> Vectorized>::ne(const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm256_set1_pd(1.0)); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h new file mode 100644 index 0000000000000000000000000000000000000000..c72d4d49274a0e8ca97f23c0157744913057d6eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h @@ -0,0 +1,469 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized> { +private: + __m256 values; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256 v) : values(v) {} + Vectorized(c10::complex val) { + float real_value = val.real(); + float imag_value = val.imag(); + values = _mm256_setr_ps(real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value + ); + } + Vectorized(c10::complex val1, c10::complex val2, c10::complex val3, c10::complex val4) { + values = _mm256_setr_ps(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag() + ); + } + operator __m256() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert(mask > -1 && mask < 16, "Unexpected mask range"); + switch (mask) { + case 0: + return a; + case 1: + return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011 + case 2: + return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100 + case 3: + return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111 + case 4: + return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000 + case 5: + return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011 + case 6: + return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100 + case 7: + return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111 + case 8: + return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000 + case 9: + return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011 + case 10: + return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100 + case 11: + return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111 + case 12: + return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000 + case 13: + return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011 + case 14: + return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100 + default: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values); + return _mm256_blendv_ps(a.values, b.values, mask_); + + } + template + static Vectorized> arange(c10::complex base = 0., step_t step = static_cast(1)) { + return Vectorized>(base, + base + step, + base + c10::complex(2)*step, + base + c10::complex(3)*step); + } + static Vectorized> set(const Vectorized>& a, const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_ps(reinterpret_cast(ptr)); + + __at_align__ float tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm256_load_ps(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[2*size()]; + _mm256_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m256 abs_2_() const { + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b + return _mm256_permute_ps(ret, 0xD8); + } + __m256 abs_() const { + auto real = _mm256_moveldup_ps(values); // real real + auto imag = _mm256_movehdup_ps(values); // imag imag + return Sleef_hypotf8_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(abs_(), real_mask); // abs 0 + } + __m256 angle_() const { + //angle = atan2(b/a) + auto b_a = _mm256_permute_ps(values, 0xB1); // b a + return Sleef_atan2f8_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle + return _mm256_and_ps(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm256_setzero_ps(); + auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ); + auto div = _mm256_div_ps(values, abs); + return _mm256_blendv_ps(div, zero, mask); + } + __m256 real_() const { + const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm256_and_ps(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m256 imag_() const { + const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF)); + return _mm256_and_ps(values, imag_mask); + } + Vectorized> imag() const { + return _mm256_permute_ps(imag_(), 0xB1); //b a + } + __m256 conj_() const { + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm256_xor_ps(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m256 log2_ = _mm256_set1_ps(std::log(2)); + return _mm256_div_ps(log(), log2_); + } + Vectorized> log10() const { + const __m256 log10_ = _mm256_set1_ps(std::log(10)); + return _mm256_div_ps(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m256 one = _mm256_set1_ps(1); + + auto conj = conj_(); + auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a + auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab + auto im = _mm256_add_ps(ab, ab); //-2ab -2ab + + auto val_2 = _mm256_mul_ps(values, values); // a*a b*b + auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a + re = _mm256_permute_ps(re, 0xD8); + re = _mm256_sub_ps(one, re); + + auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln() + } + Vectorized> acos() const { + return map(std::acos); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expf8_u10(values); //exp(a) exp(b) + exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1), + sin_cos.x, 0xAA); //cos(b) sin(b) + return _mm256_mul_ps(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m256 ln_2 = _mm256_set1_ps(c10::ln_2); + Vectorized> scaled_values = _mm256_mul_ps(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm256_ceil_ps(values); + } + Vectorized> floor() const { + return _mm256_floor_ps(values); + } + Vectorized> neg() const { + auto zero = _mm256_setzero_ps(); + return _mm256_sub_ps(zero, values); + } + Vectorized> round() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); + } + Vectorized> operator!=(const Vectorized>& other) const { + return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); + } + Vectorized> operator<(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& /*other*/) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, const Vectorized> &b) { + return _mm256_add_ps(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, const Vectorized> &b) { + return _mm256_sub_ps(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm256_mul_ps(a, b); //ac bd + + auto d_c = _mm256_permute_ps(b, 0xB1); //d c + d_c = _mm256_xor_ps(sign_mask, d_c); //d -c + auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc + + auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc + ret = _mm256_permute_ps(ret, 0xD8); + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm256_set1_ps(-0.f); + auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d| + auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c| + auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc + auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc + auto acbd2 = _mm256_mul_ps(a2, b2); + + const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc + dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + res2 = _mm256_permute_ps(res2, 0xD8); + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm256_div_ps(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm256_xor_ps(sign_mask, values); //c -d + return _mm256_div_ps(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b + auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, const Vectorized>& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm256_blendv_ps(a, b, mask); + // Exploit the fact that all-ones is a NaN. + auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, const Vectorized>& b) { + return _mm256_and_ps(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, const Vectorized>& b) { + return _mm256_or_ps(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, const Vectorized>& b) { + return _mm256_xor_ps(a, b); +} + +inline Vectorized> Vectorized>::eq( + const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +inline Vectorized> Vectorized>::ne( + const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm256_set1_ps(1.0f)); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h new file mode 100644 index 0000000000000000000000000000000000000000..bed6da627af2dc9dfc91f2b2f800585ea0d6be14 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h @@ -0,0 +1,443 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX2) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + + +#if defined(CPU_CAPABILITY_AVX2) + +template <> class Vectorized { +private: + __m256d values; +public: + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m256d v) : values(v) {} + Vectorized(double val) { + values = _mm256_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4) { + values = _mm256_setr_pd(val1, val2, val3, val4); + } + operator __m256d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm256_blend_pd(a.values, b.values, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_pd(a.values, b.values, mask.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm256_loadu_pd(reinterpret_cast(ptr)); + + + __at_align__ double tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(double)); + return _mm256_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm256_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[size()]; + _mm256_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(double)); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ); + return _mm256_movemask_pd(cmp); + } + Vectorized isnan() const { + return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q); + } + bool has_inf_nan() const { + __m256d self_sub = _mm256_sub_pd(values, values); + return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0; + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm256_set1_pd(-0.f); + return _mm256_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm256_set1_pd(0.f); + const auto nan_vec = _mm256_set1_pd(NAN); + const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ); + const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ); + const auto pi = _mm256_set1_pd(c10::pi); + + const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask); + angle = _mm256_blendv_pd(angle, nan_vec, nan_mask); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd4_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshd4_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind4_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand4_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhd4_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d4_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd4(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd4_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd4_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd4_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2d4_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d4_u10(values)); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd4(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd4_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd4_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d4_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d4_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd4_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind4_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd4_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd4_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd4_u10(values)); + } + Vectorized ceil() const { + return _mm256_ceil_pd(values); + } + Vectorized floor() const { + return _mm256_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm256_xor_pd(_mm256_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd4(values, b)); + } + Vectorized round() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand4_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd4_u10(values)); + } + Vectorized trunc() const { + return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad4_u10(values)); + } + Vectorized sqrt() const { + return _mm256_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm256_div_pd(_mm256_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd4_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); + } + + Vectorized operator!=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); + } + + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ); + } + + Vectorized operator<=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ); + } + + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ); + } + + Vectorized operator>=(const Vectorized& other) const { + return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm256_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + Vectorized max = _mm256_max_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + Vectorized min = _mm256_min_pd(a, b); + Vectorized isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); + // Exploit the fact that all-ones is a NaN. + return _mm256_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm256_min_pd(max, _mm256_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm256_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm256_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +#ifdef CPU_CAPABILITY_AVX2 +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmadd_pd(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm256_fmsub_pd(a, b, c); +} +#endif + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..b150106c43597895f8da99c52cf08061a4b18703 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_half_neon.h @@ -0,0 +1,818 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#include + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +// Right now contains only aarch64 implementation. +// Due to follow two reasons aarch32 is not currently supported. +// 1. Due to difference in ISA been aarch32 and aarch64, intrinsics +// that work for aarch64 dont work for aarch32. +// 2. Android NDK r21 has problems with compiling aarch32. +// Clang seg faults. +// https://github.com/android/ndk/issues/1248 +// https://bugs.llvm.org/show_bug.cgi?id=45824 +// Most likely we will do aarch32 support with inline asm. +#if !defined(C10_MOBILE) && defined(__aarch64__) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + +#ifdef __BIG_ENDIAN__ +#error "Big endian is not supported." +#endif + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res); +}; + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res) { + return vsetq_lane_f16(vgetq_lane_f16(b, index), res, index); + } +}; + +template +struct BlendHalfRegs { + static float16x8_t impl( + const float16x8_t& a, + const float16x8_t& b, + float16x8_t& res) { + return vsetq_lane_f16(vgetq_lane_f16(a, index), res, index); + } +}; + +// On ARM, Half type supports float16_t->Half constructor and Half->float16_t +// conversion +template <> +class Vectorized { + private: + float16x8x2_t values; + + public: + // value_type should be c10::Half to fit interface with vec_base.h + using value_type = c10::Half; + using size_type = int; + static constexpr size_type size() { + static_assert(sizeof(float16x8x2_t) == 16 * sizeof(value_type)); + return 16; + } + + private: + // We use these private map functions to implement various methods + Vectorized map2( + const Vectorized& second, + c10::Half (*const f)(c10::Half, c10::Half)) const { + __at_align__ c10::Half tmp_first[size()]; + __at_align__ c10::Half tmp_second[size()]; + store(tmp_first); // store this to tmp_first + second.store(tmp_second); + for (const auto i : c10::irange(size())) { + tmp_first[i] = f(tmp_first[i], tmp_second[i]); + } + return loadu(tmp_first); + } + + Vectorized map_with_vec_float_method( + Vectorized (Vectorized::*m)() const) const { + // Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0])); + float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0])); + Vectorized mv0 = (Vectorized(v00, v01).*m)(); + float16x4_t r00 = vcvt_f16_f32(mv0.get_low()); + float16x4_t r01 = vcvt_f16_f32(mv0.get_high()); + + // Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1])); + float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1])); + Vectorized mv1 = (Vectorized(v10, v11).*m)(); + float16x4_t r10 = vcvt_f16_f32(mv1.get_low()); + float16x4_t r11 = vcvt_f16_f32(mv1.get_high()); + + // Pack result into Vectorized + return Vectorized( + vcombine_f16(r00, r01), vcombine_f16(r10, r11)); + } + + Vectorized map2_with_vec_float_method( + const Vectorized& second, + Vectorized (Vectorized::*m)(const Vectorized&) + const) const { + // Convert low float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v00 = vcvt_f32_f16(vget_low_f16(values.val[0])); + float32x4_t v01 = vcvt_f32_f16(vget_high_f16(values.val[0])); + float32x4_t second_v00 = vcvt_f32_f16(vget_low_f16(second.get_low())); + float32x4_t second_v01 = vcvt_f32_f16(vget_high_f16(second.get_low())); + Vectorized mv0 = (Vectorized(v00, v01).*m)( + Vectorized(second_v00, second_v01)); + float16x4_t r00 = vcvt_f16_f32(mv0.get_low()); + float16x4_t r01 = vcvt_f16_f32(mv0.get_high()); + + // Convert high float16x8_t to 2 float32x4_t variables, apply m, and convert + // back + float32x4_t v10 = vcvt_f32_f16(vget_low_f16(values.val[1])); + float32x4_t v11 = vcvt_f32_f16(vget_high_f16(values.val[1])); + float32x4_t second_v10 = vcvt_f32_f16(vget_low_f16(second.get_high())); + float32x4_t second_v11 = vcvt_f32_f16(vget_high_f16(second.get_high())); + Vectorized mv1 = (Vectorized(v10, v11).*m)( + Vectorized(second_v10, second_v11)); + float16x4_t r10 = vcvt_f16_f32(mv1.get_low()); + float16x4_t r11 = vcvt_f16_f32(mv1.get_high()); + + // Pack result into Vectorized + return Vectorized( + vcombine_f16(r00, r01), vcombine_f16(r10, r11)); + } + + public: + // constructor + Vectorized() {} + Vectorized(float16x8x2_t v) : values(v) {} + + // A ctor that accepts c10::Half is needed to fit interface with vec_base.h + // A second constructor that takes float16_t is also included + Vectorized(c10::Half val) + : values{vdupq_n_f16((float16_t)val), vdupq_n_f16((float16_t)val)} { + } + Vectorized(float16_t val) : values{vdupq_n_f16(val), vdupq_n_f16(val)} {} + Vectorized( + float16_t val0, + float16_t val1, + float16_t val2, + float16_t val3, + float16_t val4, + float16_t val5, + float16_t val6, + float16_t val7, + float16_t val8, + float16_t val9, + float16_t val10, + float16_t val11, + float16_t val12, + float16_t val13, + float16_t val14, + float16_t val15) + : values{ + val0, + val1, + val2, + val3, + val4, + val5, + val6, + val7, + val8, + val9, + val10, + val11, + val12, + val13, + val14, + val15} {} + Vectorized(float16x8_t val0, float16x8_t val1) : values{val0, val1} {} + operator float16x8x2_t() const { + return values; + } + template + static Vectorized blend( + const Vectorized& a, + const Vectorized& b) { + Vectorized vec; + // 0. + vec.values.val[0] = BlendHalfRegs<0, (mask & 0x01) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<1, (mask & 0x02) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<2, (mask & 0x04) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<3, (mask & 0x08) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + + vec.values.val[0] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + vec.values.val[0] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl( + a.values.val[0], b.values.val[0], vec.values.val[0]); + + // 1. + vec.values.val[1] = BlendHalfRegs<0, (mask & 0x10) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<1, (mask & 0x20) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<2, (mask & 0x40) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<3, (mask & 0x80) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + + vec.values.val[1] = BlendHalfRegs<4, (mask & 0x10) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<5, (mask & 0x20) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<6, (mask & 0x40) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + vec.values.val[1] = BlendHalfRegs<7, (mask & 0x80) != 0>::impl( + a.values.val[1], b.values.val[1], vec.values.val[1]); + + return vec; + } + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // Note: using blendv is very awkward because 0xFFFF is one of many NaN's in + // FP16 It's unfortunate that the mask has type Half (required from + // vec_base) + + // TODO + // NB: This requires that each value, i.e., each uint value, + // of the mask either all be zeros or all be 1s. + // We perhaps need some kind of an assert? + // But that will affect performance. + Vectorized vec(mask.values); + vec.values.val[0] = vbslq_f16( + vreinterpretq_u16_f16(vec.values.val[0]), + b.values.val[0], + a.values.val[0]); + vec.values.val[1] = vbslq_f16( + vreinterpretq_u16_f16(vec.values.val[1]), + b.values.val[1], + a.values.val[1]); + return vec; + } + template + static Vectorized arange( + c10::Half base = 0.0, + step_t step = static_cast(1)) { + const Vectorized base_vec(base); + const Vectorized step_vec(step); + const Vectorized step_sizes( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + return fmadd(step_sizes, step_vec, base_vec); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + int64_t count = size()) { + uint16_t pre_mask[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + for (int i = 0; i < count; i++) { + pre_mask[i] = 0xFFFF; + } + uint16x8x2_t mask = vld1q_u16_x2(pre_mask); + + // Using blendv is awkward because 0xFFFF is one of many NaN's in FP16 + // so we directly use vbslq_f16 instead + Vectorized vec( + vbslq_f16( + // Low bits + mask.val[0], + b.values.val[0], + a.values.val[0]), + // High bits + vbslq_f16(mask.val[1], b.values.val[1], a.values.val[1])); + + return vec; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) { + return vld1q_f16_x2(reinterpret_cast(ptr)); + } else if (count == (size() >> 1)) { + Vectorized res; + res.values.val[0] = vld1q_f16(reinterpret_cast(ptr)); + std::memset(&res.values.val[1], 0, sizeof(res.values.val[1])); + return res; + } + __at_align__ float16_t tmp_values[size()]; + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(float16_t)); + return vld1q_f16_x2(reinterpret_cast(tmp_values)); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + vst1q_f16_x2(reinterpret_cast(ptr), values); + return; + } else if (count == (size() >> 1)) { + vst1q_f16(reinterpret_cast(ptr), values.val[0]); + } else { + float16_t tmp_values[size()]; + vst1q_f16_x2(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(float16_t)); + } + } + inline const float16x8_t& get_low() const { + return values.val[0]; + } + inline float16x8_t& get_low() { + return values.val[0]; + } + inline const float16x8_t& get_high() const { + return values.val[1]; + } + inline float16x8_t& get_high() { + return values.val[1]; + } + // Very slow implementation of indexing. + // Only required because vec256_qint refers to this. + // Once we specialize that implementation for ARM + // this should be removed. TODO (kimishpatel) + c10::Half operator[](int idx) const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + return tmp[idx]; + } + c10::Half operator[](int idx) { + __at_align__ c10::Half tmp[size()]; + store(tmp); + return tmp[idx]; + } + // For boolean version where we want to if any 1/all zero + // etc. can be done faster in a different way. + int zero_mask() const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + int mask = 0; + for (int i = 0; i < size(); ++i) { + if (tmp[i] == 0) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + __at_align__ c10::Half tmp[size()]; + __at_align__ c10::Half res[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i])) { + std::memset(static_cast(&res[i]), 0xFF, sizeof(c10::Half)); + } else { + std::memset(static_cast(&res[i]), 0, sizeof(c10::Half)); + } + } + return loadu(res); + }; + bool has_inf_nan() const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + if (_isnan(tmp[i]) || _isinf(tmp[i])) { + return true; + } + } + return false; + } + Vectorized map(c10::Half (*const f)(c10::Half)) const { + __at_align__ c10::Half tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + return Vectorized( + vabsq_f16(values.val[0]), vabsq_f16(values.val[1])); + } + Vectorized angle() const { + auto zero = Vectorized(0); + auto pi = Vectorized(c10::pi); + auto tmp = blendv(zero, pi, *this < zero); + return blendv(tmp, *this, isnan()); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized(0); + } + Vectorized conj() const { + return *this; + } + + // Sleef does not support FP16, so many math functions are applied by + // converting to FP32, applying the math function, and then converting back to + // FP16. + Vectorized acos() const { + return map_with_vec_float_method(&Vectorized::acos); + } + Vectorized acosh() const { + return map_with_vec_float_method(&Vectorized::acosh); + } + Vectorized asin() const { + return map_with_vec_float_method(&Vectorized::asin); + } + Vectorized atan() const { + return map_with_vec_float_method(&Vectorized::atan); + } + Vectorized atanh() const { + return map_with_vec_float_method(&Vectorized::atanh); + } + Vectorized atan2(const Vectorized& exp) const { + return map2_with_vec_float_method(exp, &Vectorized::atan2); + } + Vectorized copysign(const Vectorized& sign) const { + return map2_with_vec_float_method(sign, &Vectorized::copysign); + } + Vectorized erf() const { + return map_with_vec_float_method(&Vectorized::erf); + } + Vectorized erfc() const { + return map_with_vec_float_method(&Vectorized::erfc); + } + Vectorized erfinv() const { + return map_with_vec_float_method(&Vectorized::erfinv); + } + Vectorized exp() const { + return map_with_vec_float_method(&Vectorized::exp); + } + Vectorized exp2() const { + return map_with_vec_float_method(&Vectorized::exp2); + } + Vectorized expm1() const { + return map_with_vec_float_method(&Vectorized::expm1); + } + Vectorized exp_u20() const { + return map_with_vec_float_method(&Vectorized::exp_u20); + } + Vectorized fmod(const Vectorized& q) const { + // This function is questionable with a conversion, so we use map2 + return map2(q, std::fmod); + } + Vectorized hypot(const Vectorized& b) const { + return map2_with_vec_float_method(b, &Vectorized::hypot); + } + Vectorized i0() const { + return map_with_vec_float_method(&Vectorized::i0); + } + Vectorized i0e() const { + return map_with_vec_float_method(&Vectorized::i0e); + } + Vectorized digamma() const { + return map_with_vec_float_method(&Vectorized::digamma); + } + Vectorized igamma(const Vectorized& x) const { + return map2_with_vec_float_method(x, &Vectorized::igamma); + } + Vectorized igammac(const Vectorized& x) const { + return map2_with_vec_float_method(x, &Vectorized::igammac); + } + Vectorized log() const { + return map_with_vec_float_method(&Vectorized::log); + } + Vectorized log10() const { + return map_with_vec_float_method(&Vectorized::log10); + } + Vectorized log1p() const { + return map_with_vec_float_method(&Vectorized::log1p); + } + Vectorized log2() const { + return map_with_vec_float_method(&Vectorized::log2); + } + Vectorized nextafter(const Vectorized& b) const { + // This function does not make sense with conversion, so we use map2 + return map2(b, std::nextafter); + } + Vectorized frac() const; + Vectorized sin() const { + return map_with_vec_float_method(&Vectorized::sin); + } + Vectorized sinh() const { + return map_with_vec_float_method(&Vectorized::sinh); + } + Vectorized cos() const { + return map_with_vec_float_method(&Vectorized::cos); + } + Vectorized cosh() const { + return map_with_vec_float_method(&Vectorized::cosh); + } + Vectorized ceil() const { + // This function is questionable with a conversion, so we use map + return map(at::native::ceil_impl); + } + Vectorized floor() const { + // This function is questionable with a conversion, so we use map + return map(at::native::floor_impl); + } + Vectorized neg() const { + return Vectorized( + vnegq_f16(values.val[0]), vnegq_f16(values.val[1])); + } + inline Vectorized round() const { + // This function is questionable with a conversion, so we use map + return map(at::native::round_impl); + } + inline Vectorized tan() const { + return map_with_vec_float_method(&Vectorized::tan); + } + inline Vectorized tanh() const { + return map_with_vec_float_method(&Vectorized::tanh); + } + Vectorized trunc() const { + float16x8_t r0 = vrndq_f16(values.val[0]); + float16x8_t r1 = vrndq_f16(values.val[1]); + return Vectorized(r0, r1); + } + Vectorized lgamma() const { + return map_with_vec_float_method(&Vectorized::lgamma); + } + Vectorized sqrt() const { + return Vectorized( + vsqrtq_f16(values.val[0]), vsqrtq_f16(values.val[1])); + } + Vectorized reciprocal() const { + auto ones = vdupq_n_f16(1.0f); + auto r0 = vdivq_f16(ones, values.val[0]); + auto r1 = vdivq_f16(ones, values.val[1]); + return Vectorized(r0, r1); + } + Vectorized rsqrt() const { + return this->sqrt().reciprocal(); + } + Vectorized pow(const Vectorized& exp) const { + return map2_with_vec_float_method(exp, &Vectorized::pow); + } + Vectorized operator==(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vceqq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vceqq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator!=(const Vectorized& other) const { + float16x8_t r0 = vreinterpretq_f16_u16( + vmvnq_u16(vceqq_f16(values.val[0], other.values.val[0]))); + float16x8_t r1 = vreinterpretq_f16_u16( + vmvnq_u16(vceqq_f16(values.val[1], other.values.val[1]))); + return Vectorized(r0, r1); + } + + Vectorized operator<(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcltq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcltq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator<=(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcleq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcleq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcgtq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcgtq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized operator>=(const Vectorized& other) const { + float16x8_t r0 = + vreinterpretq_f16_u16(vcgeq_f16(values.val[0], other.values.val[0])); + float16x8_t r1 = + vreinterpretq_f16_u16(vcgeq_f16(values.val[1], other.values.val[1])); + return Vectorized(r0, r1); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; // Vectorized + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vaddq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vaddq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator-( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vsubq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vsubq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vmulq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vmulq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator/( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vdivq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vdivq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vmaxq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vmaxq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vminq_f16(a.get_low(), b.get_low()); + float16x8_t r1 = vminq_f16(a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline clamp( + const Vectorized& a, + const Vectorized& min, + const Vectorized& max) { + return minimum(max, maximum(min, a)); +} + +template <> +Vectorized inline clamp_max( + const Vectorized& a, + const Vectorized& max) { + return minimum(max, a); +} + +template <> +Vectorized inline clamp_min( + const Vectorized& a, + const Vectorized& min) { + return maximum(min, a); +} + +template <> +Vectorized inline operator&( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(vandq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(vandq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator|( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(vorrq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(vorrq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline operator^( + const Vectorized& a, + const Vectorized& b) { + float16x8_t r0 = vreinterpretq_f16_u16(veorq_u16( + vreinterpretq_u16_f16(a.get_low()), vreinterpretq_u16_f16(b.get_low()))); + float16x8_t r1 = vreinterpretq_f16_u16(veorq_u16( + vreinterpretq_u16_f16(a.get_high()), + vreinterpretq_u16_f16(b.get_high()))); + return Vectorized(r0, r1); +} + +inline Vectorized Vectorized::eq( + const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne( + const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt( + const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge( + const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt( + const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le( + const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +template <> +inline void convert(const float16_t* src, int16_t* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + vst1q_s16(dst + i, vcvtq_s16_f16(vld1q_f16(src + i))); + vst1q_s16(dst + i + 8, vcvtq_s16_f16(vld1q_f16(src + i + 8))); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int16_t* src, float16_t* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); + i += Vectorized::size()) { + vst1q_f16(dst + i, vcvtq_f16_s16(vld1q_s16(src + i))); + vst1q_f16(dst + i + 8, vcvtq_f16_s16(vld1q_s16(src + i + 8))); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +Vectorized inline fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + float16x8_t r0 = vfmaq_f16(c.get_low(), a.get_low(), b.get_low()); + float16x8_t r1 = vfmaq_f16(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +template <> +Vectorized inline fmsub( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + float16x8_t r0 = vfmsq_f16(c.get_low(), a.get_low(), b.get_low()); + float16x8_t r1 = vfmsq_f16(c.get_high(), a.get_high(), b.get_high()); + return Vectorized(r0, r1); +} + +#endif /* defined(aarch64) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(C10_MOBILE) */ + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h new file mode 100644 index 0000000000000000000000000000000000000000..6263efd2039ce650c5dd480257e5757aff20540f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h @@ -0,0 +1,1586 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#ifdef CPU_CAPABILITY_AVX2 + +struct Vectorizedi { +protected: + __m256i values; + + static inline __m256i invert(const __m256i& v) { + const auto ones = _mm256_set1_epi64x(-1); + return _mm256_xor_si256(ones, v); + } +public: + Vectorizedi() {} + Vectorizedi(__m256i v) : values(v) {} + operator __m256i() const { + return values; + } +}; + +#else + +struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined + +#endif // CPU_CAPABILITY_AVX2 + +#ifdef CPU_CAPABILITY_AVX2 + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int64_t; + using size_type = int; + static constexpr size_type size() { + return 4; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); } + Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) { + values = _mm256_setr_epi64x(val1, val2, val3, val4); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int64_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi64(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi64(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi64(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi64(b.values, 3); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int64_t base = 0, step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ int64_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int64_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int64_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int64_t)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + Vectorized abs() const { + auto zero = _mm256_set1_epi64x(0); + auto is_larger = _mm256_cmpgt_epi64(zero, values); + auto inverse = _mm256_xor_si256(values, is_larger); + return _mm256_sub_epi64(inverse, is_larger); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi64x(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi64(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi64(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi64(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi64(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi64(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int32_t; + static constexpr int size() { + return 8; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int32_t v) { values = _mm256_set1_epi32(v); } + Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4, + int32_t val5, int32_t val6, int32_t val7, int32_t val8) { + values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + return _mm256_blend_epi32(a, b, mask); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int32_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int32_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int32_t count) { + __at_align__ int32_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int32_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int32_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int32_t)); + } + } + const int32_t& operator[](int idx) const = delete; + int32_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi32(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi32(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi32(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi32(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi32(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi32(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi32(other.values, values)); + } + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +inline void convert(const int32_t *src, float *dst, int64_t n) { + int64_t i; + // int32_t and float have same size +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_vec = _mm256_loadu_si256(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_ps(input_vec); + _mm256_storeu_ps(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int32_t *src, double *dst, int64_t n) { + int64_t i; + // int32_t has half the size of double +#ifndef _MSC_VER +# pragma unroll +#endif + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto input_128_vec = _mm_loadu_si128(reinterpret_cast(src + i)); + auto output_vec = _mm256_cvtepi32_pd(input_128_vec); + _mm256_storeu_pd(reinterpret_cast(dst + i), output_vec); + } +#ifndef _MSC_VER +# pragma unroll +#endif + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +class Vectorized : public Vectorizedi { +private: + static const Vectorized ones; +public: + using value_type = int16_t; + static constexpr int size() { + return 16; + } + using Vectorizedi::Vectorizedi; + Vectorized() {} + Vectorized(int16_t v) { values = _mm256_set1_epi16(v); } + Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4, + int16_t val5, int16_t val6, int16_t val7, int16_t val8, + int16_t val9, int16_t val10, int16_t val11, int16_t val12, + int16_t val13, int16_t val14, int16_t val15, int16_t val16) { + values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi16(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi16(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi16(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi16(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi16(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi16(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi16(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi16(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi16(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi16(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi16(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi16(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi16(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi16(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi16(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi16(b.values, 15); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(int16_t base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, int16_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu(const void* ptr, int16_t count) { + __at_align__ int16_t tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(int16_t)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + __at_align__ int16_t tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(int16_t)); + } + } + const int16_t& operator[](int idx) const = delete; + int16_t& operator[](int idx) = delete; + Vectorized abs() const { + return _mm256_abs_epi16(values); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized neg() const; + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi16(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi16(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi16(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return _mm256_cmpgt_epi16(values, other.values); + } + Vectorized operator>=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi16(other.values, values)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template +class Vectorized8 : public Vectorizedi { + static_assert( + std::is_same_v || std::is_same_v, + "Only int8_t/uint8_t are supported"); +protected: + static const Vectorized ones; +public: + using value_type = T; + static constexpr int size() { + return 32; + } + using Vectorizedi::Vectorizedi; + Vectorized8() {} + Vectorized8(T v) { values = _mm256_set1_epi8(v); } + Vectorized8(T val1, T val2, T val3, T val4, + T val5, T val6, T val7, T val8, + T val9, T val10, T val11, T val12, + T val13, T val14, T val15, T val16, + T val17, T val18, T val19, T val20, + T val21, T val22, T val23, T val24, + T val25, T val26, T val27, T val28, + T val29, T val30, T val31, T val32) { + values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16, + val17, val18, val19, val20, val21, val22, val23, val24, + val25, val26, val27, val28, val29, val30, val31, val32); + } + template + static Vectorized blend(Vectorized a, Vectorized b) { + __at_align__ T tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = _mm256_extract_epi8(b.values, 0); + if (mask & 0x02) + tmp_values[1] = _mm256_extract_epi8(b.values, 1); + if (mask & 0x04) + tmp_values[2] = _mm256_extract_epi8(b.values, 2); + if (mask & 0x08) + tmp_values[3] = _mm256_extract_epi8(b.values, 3); + if (mask & 0x10) + tmp_values[4] = _mm256_extract_epi8(b.values, 4); + if (mask & 0x20) + tmp_values[5] = _mm256_extract_epi8(b.values, 5); + if (mask & 0x40) + tmp_values[6] = _mm256_extract_epi8(b.values, 6); + if (mask & 0x80) + tmp_values[7] = _mm256_extract_epi8(b.values, 7); + if (mask & 0x100) + tmp_values[8] = _mm256_extract_epi8(b.values, 8); + if (mask & 0x200) + tmp_values[9] = _mm256_extract_epi8(b.values, 9); + if (mask & 0x400) + tmp_values[10] = _mm256_extract_epi8(b.values, 10); + if (mask & 0x800) + tmp_values[11] = _mm256_extract_epi8(b.values, 11); + if (mask & 0x1000) + tmp_values[12] = _mm256_extract_epi8(b.values, 12); + if (mask & 0x2000) + tmp_values[13] = _mm256_extract_epi8(b.values, 13); + if (mask & 0x4000) + tmp_values[14] = _mm256_extract_epi8(b.values, 14); + if (mask & 0x8000) + tmp_values[15] = _mm256_extract_epi8(b.values, 15); + if (mask & 0x010000) + tmp_values[16] = _mm256_extract_epi8(b.values, 16); + if (mask & 0x020000) + tmp_values[17] = _mm256_extract_epi8(b.values, 17); + if (mask & 0x040000) + tmp_values[18] = _mm256_extract_epi8(b.values, 18); + if (mask & 0x080000) + tmp_values[19] = _mm256_extract_epi8(b.values, 19); + if (mask & 0x100000) + tmp_values[20] = _mm256_extract_epi8(b.values, 20); + if (mask & 0x200000) + tmp_values[21] = _mm256_extract_epi8(b.values, 21); + if (mask & 0x400000) + tmp_values[22] = _mm256_extract_epi8(b.values, 22); + if (mask & 0x800000) + tmp_values[23] = _mm256_extract_epi8(b.values, 23); + if (mask & 0x1000000) + tmp_values[24] = _mm256_extract_epi8(b.values, 24); + if (mask & 0x2000000) + tmp_values[25] = _mm256_extract_epi8(b.values, 25); + if (mask & 0x4000000) + tmp_values[26] = _mm256_extract_epi8(b.values, 26); + if (mask & 0x8000000) + tmp_values[27] = _mm256_extract_epi8(b.values, 27); + if (mask & 0x10000000) + tmp_values[28] = _mm256_extract_epi8(b.values, 28); + if (mask & 0x20000000) + tmp_values[29] = _mm256_extract_epi8(b.values, 29); + if (mask & 0x40000000) + tmp_values[30] = _mm256_extract_epi8(b.values, 30); + if (mask & 0x80000000) + tmp_values[31] = _mm256_extract_epi8(b.values, 31); + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + return _mm256_blendv_epi8(a.values, b.values, mask.values); + } + template + static Vectorized arange(T base = 0, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step); + } + static Vectorized + set(Vectorized a, Vectorized b, T count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<0x1>(a, b); + case 2: + return blend<0x3>(a, b); + case 3: + return blend<0x7>(a, b); + case 4: + return blend<0xF>(a, b); + case 5: + return blend<0x1F>(a, b); + case 6: + return blend<0x3F>(a, b); + case 7: + return blend<0x7F>(a, b); + case 8: + return blend<0xFF>(a, b); + case 9: + return blend<0x1FF>(a, b); + case 10: + return blend<0x3FF>(a, b); + case 11: + return blend<0x7FF>(a, b); + case 12: + return blend<0xFFF>(a, b); + case 13: + return blend<0x1FFF>(a, b); + case 14: + return blend<0x3FFF>(a, b); + case 15: + return blend<0x7FFF>(a, b); + case 16: + return blend<0xFFFF>(a, b); + case 17: + return blend<0x1FFFF>(a, b); + case 18: + return blend<0x3FFFF>(a, b); + case 19: + return blend<0x7FFFF>(a, b); + case 20: + return blend<0xFFFFF>(a, b); + case 21: + return blend<0x1FFFFF>(a, b); + case 22: + return blend<0x3FFFFF>(a, b); + case 23: + return blend<0x7FFFFF>(a, b); + case 24: + return blend<0xFFFFFF>(a, b); + case 25: + return blend<0x1FFFFFF>(a, b); + case 26: + return blend<0x3FFFFFF>(a, b); + case 27: + return blend<0x7FFFFFF>(a, b); + case 28: + return blend<0xFFFFFFF>(a, b); + case 29: + return blend<0x1FFFFFFF>(a, b); + case 30: + return blend<0x3FFFFFFF>(a, b); + case 31: + return blend<0x7FFFFFFF>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr) { + return _mm256_loadu_si256(reinterpret_cast(ptr)); + } + static Vectorized loadu_one_fourth(const void* ptr) { + // Fast path if only load element number of 8. + // Note: We didn't merge it as fast path of loadu(const void* ptr, T count), + // Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits. + // However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined. + // TODO We can use _mm256_zextsi128_si256 in the furture, + // since gcc 9.3 doesn't support it now. + __m128i input_128 = _mm_loadl_epi64(reinterpret_cast(ptr)); + return _mm256_castsi128_si256(input_128); + } + static Vectorized loadu(const void* ptr, T count) { + __at_align__ T tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, ptr, count * sizeof(T)); + return loadu(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + // ptr need not to be aligned here. See + // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html + _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values); + } else if (count > 0) { + if (count == 8) { + // Fast path if only store element number of 8 + _mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values)); + } else { + __at_align__ T tmp_values[size()]; + _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(T)); + } + } + } + const T& operator[](int idx) const = delete; + T& operator[](int idx) = delete; + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm256_set1_epi8(0); + } + Vectorized conj() const { + return *this; + } +}; + +template<> +class Vectorized: public Vectorized8 { +public: + using Vectorized8::Vectorized8; + + Vectorized neg() const; + + Vectorized abs() const { + return _mm256_abs_epi8(values); + } + + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi8(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi8(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + return _mm256_cmpgt_epi8(other.values, values); + } + Vectorized operator<=(const Vectorized& other) const { + return invert(_mm256_cmpgt_epi8(values, other.values)); + } + Vectorized operator>(const Vectorized& other) const { + return other < *this; + } + Vectorized operator>=(const Vectorized& other) const { + return other <= *this; + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template<> +class Vectorized: public Vectorized8 { +public: + using Vectorized8::Vectorized8; + + Vectorized neg() const; + + Vectorized abs() const { + return *this; + } + + Vectorized operator==(const Vectorized& other) const { + return _mm256_cmpeq_epi8(values, other.values); + } + Vectorized operator!=(const Vectorized& other) const { + return invert(_mm256_cmpeq_epi8(values, other.values)); + } + Vectorized operator<(const Vectorized& other) const { + __m256i max = _mm256_max_epu8(values, other.values); + return invert(_mm256_cmpeq_epi8(max, values)); + } + Vectorized operator<=(const Vectorized& other) const { + __m256i max = _mm256_max_epu8(values, other.values); + return _mm256_cmpeq_epi8(max, other.values); + } + Vectorized operator>(const Vectorized& other) const { + return other < *this; + } + Vectorized operator>=(const Vectorized& other) const { + return other <= *this; + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi64(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi16(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi8(a, b); +} + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm256_add_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi64(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi32(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi16(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi8(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm256_sub_epi8(a, b); +} + +// Negation. Defined here so we can utilize operator- +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +inline Vectorized Vectorized::neg() const { + return Vectorized(0) - *this; +} + +// Emulate operations with no native 64-bit support in avx, +// by extracting each element, performing the operation pointwise, +// then combining the results into a vector. +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = op(a0, b0); + int64_t c1 = op(a1, b1); + int64_t c2 = op(a2, b2); + int64_t c3 = op(a3, b3); + + return _mm256_set_epi64x(c3, c2, c1, c0); +} + +template +Vectorized inline emulate(const Vectorized& a, const Vectorized& b, const Vectorized& c, const op_t& op) { + int64_t a0 = _mm256_extract_epi64(a, 0); + int64_t a1 = _mm256_extract_epi64(a, 1); + int64_t a2 = _mm256_extract_epi64(a, 2); + int64_t a3 = _mm256_extract_epi64(a, 3); + + int64_t b0 = _mm256_extract_epi64(b, 0); + int64_t b1 = _mm256_extract_epi64(b, 1); + int64_t b2 = _mm256_extract_epi64(b, 2); + int64_t b3 = _mm256_extract_epi64(b, 3); + + int64_t c0 = _mm256_extract_epi64(c, 0); + int64_t c1 = _mm256_extract_epi64(c, 1); + int64_t c2 = _mm256_extract_epi64(c, 2); + int64_t c3 = _mm256_extract_epi64(c, 3); + + int64_t d0 = op(a0, b0, c0); + int64_t d1 = op(a1, b1, c1); + int64_t d2 = op(a2, b2, c2); + int64_t d3 = op(a3, b3, c3); + + return _mm256_set_epi64x(d3, d2, d1, d0); +} + +// AVX2 has no intrinsic for int64_t multiply so it needs to be emulated +// This could be implemented more efficiently using epi32 instructions +// This is also technically avx compatible, but then we'll need AVX +// code for add as well. +// Note: intentionally ignores undefined behavior like (-lowest * -1). +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;}); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm256_mullo_epi16(a, b); +} + +template +Vectorized inline int_elementwise_binary_256(const Vectorized& a, const Vectorized& b, Op op) { + T values_a[Vectorized::size()]; + T values_b[Vectorized::size()]; + a.store(values_a); + b.store(values_b); + for (int i = 0; i != Vectorized::size(); i++) { + values_a[i] = op(values_a[i], values_b[i]); + } + return Vectorized::loadu(values_a); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying int8_t +#ifndef CPU_CAPABILITY_AVX2 + return int_elementwise_binary_256(a, b, std::multiplies()); +#else + __m256i mask00FF = _mm256_set1_epi16(0x00FF); + __m256i a_lo = _mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8); + __m256i b_lo = _mm256_srai_epi16(_mm256_slli_epi16(b, 8), 8); + __m256i a_hi = _mm256_srai_epi16(a, 8); + __m256i b_hi = _mm256_srai_epi16(b, 8); + __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF); + __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8); + __m256i res = _mm256_or_si256(res_hi, res_lo); + return res; +#endif +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + // We don't have an instruction for multiplying uint8_t +#ifndef CPU_CAPABILITY_AVX2 + return int_elementwise_binary_256(a, b, std::multiplies()); +#else + __m256i mask00FF = _mm256_set1_epi16(0x00FF); + __m256i a_lo = _mm256_and_si256 (a, mask00FF); + __m256i b_lo = _mm256_and_si256 (b, mask00FF); + __m256i a_hi = _mm256_srli_epi16(a, 8); + __m256i b_hi = _mm256_srli_epi16(b, 8); + __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF); + __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8); + __m256i res = _mm256_or_si256(res_hi, res_lo); + return res; +#endif +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);}); +#else + __m256i cmp = _mm256_cmpgt_epi64(a, b); + return _mm256_blendv_epi8(a, b, cmp); +#endif +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi32(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi16(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epi8(a, b); +} + +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + return _mm256_min_epu8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);}); +#else + __m256i cmp = _mm256_cmpgt_epi64(a, b); + return _mm256_blendv_epi8(b, a, cmp); +#endif +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi32(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi16(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epi8(a, b); +} + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return _mm256_max_epu8(a, b); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));}); +#else + return minimum(maximum(a, min_val), max_val); +#endif +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val)); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min_val, const Vectorized& max_val) { + return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);}); +#else + return minimum(max_val, a); +#endif +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi32(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi16(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epi8(max_val, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max_val) { + return _mm256_min_epu8(max_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { +#ifndef CPU_CAPABILITY_AVX2 + return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);}); +#else + return maximum(min_val, a); +#endif +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi32(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi16(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epi8(min_val, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min_val) { + return _mm256_max_epu8(min_val, a); +} + +template +Vectorized inline convert_to_int32(const T* ptr) { + return Vectorized::loadu(ptr); +} + +template<> +Vectorized inline convert_to_int32(const int8_t* ptr) { + return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template<> +Vectorized inline convert_to_int32(const uint8_t* ptr) { + return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast(ptr))); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return int_elementwise_binary_256(a, b, std::divides()); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return _mm256_and_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return _mm256_or_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return _mm256_xor_si256(a, b); +} +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + return _mm256_xor_si256(a, _mm256_set1_epi32(-1)); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1); +} + +template +Vectorized inline shift_256_16(const Vectorized& a, const Vectorized& b) { + // No vector instruction for shifting int16_t, so emulating it instead. + + // Control masks for shuffle operation, treating 256 bits as an + // array of 16-bit elements, and considering pairs of neighboring + // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and + // M!=N) is set so that shuffle will move element with index M from + // input pair into element with index N in output pair, and element + // with index M in output pair will be set to all 0s. + __m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80, + 21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80, + 13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80, + 5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80); + __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26, + 0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18, + 0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10, + 0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2); + + // Masks for bitwise and operation, treating 256 bits as an array of + // 16-bit elements, and considering them in pairs of neighboring + // elements. A mask named "keep_M" (M in [0,1]) is set so that + // bitwise and will copy element with index M from input pair into + // element with the same index in output pair, while the other + // element in output pair will be set to all 0s. + __m256i keep_0 = _mm256_set1_epi32(0xFFFF); + __m256i keep_1 = _mm256_set1_epi32(0xFFFF0000); + + // Take each 16-bit element with idx%2==0 from input array to be + // shifted and extend it to 32 bits so that 0s are added to the + // right. Then, perform shifting on this 32-bit number. Upper 16 + // bits will be proper result of shifting original 16-bit number, so + // write them to result array, into the same position from which + // corresponding input element is taken. Also, make sure that + // result array elements with idx%2!=0 are set to all 0s. + // + // Note that number of bits to shift for is extended to 32 bits by + // adding 0s to the left. That means this number is not properly + // sign-extended for negative values. However, number of bits to + // shift is treated as an unsigned integer by respective shift + // intrinsics anyway so if negative then either with or without + // proper sign extension, it will be interpreted as a number greater + // than 32, and the shifting result will be the same. + __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1); + __m256i b0 = _mm256_and_si256(b, keep_0); + __m256i c0; + if (left_shift) + c0 = _mm256_sllv_epi32(a0, b0); + else + c0 = _mm256_srav_epi32(a0, b0); + c0 = _mm256_shuffle_epi8(c0, ctl_1_0); + + // Peform shifting the same way for input array elements with + // idx%2==1. + __m256i a1 = _mm256_and_si256(a, keep_1); + __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0); + __m256i c1; + if (left_shift) + c1 = _mm256_sllv_epi32(a1, b1); + else + c1 = _mm256_srav_epi32(a1, b1); + c1 = _mm256_and_si256(c1, keep_1); + + // Merge partial results into the final result. + __m256i c = _mm256_or_si256(c0, c1); + + return c; +} + +template || std::is_same_v, int> = 0> +Vectorized inline shift_256_8(const Vectorized& a, const Vectorized& b) { + // No vector instruction for shifting int8_t/uint8_t, so emulating + // it instead. + + // Control masks for shuffle operation, treating 256 bits as an + // array of 8-bit elements, and considering quadruples of + // neighboring elements. Specifially, a mask named "ctl_M_N" (M,N + // in [0,1,2,3], and M!=N) is set so that shuffle will move element + // with index M from input quadruple into element with index N in + // output quadruple, and other elements in output quadruple will be + // set to all 0s. + __m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80, + 20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80, + 12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80, + 4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80); + __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25, + 0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17, + 0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9, + 0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1); + __m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80, + 21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80, + 13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80, + 5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80); + __m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26, + 0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18, + 0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10, + 0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2); + __m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80, + 22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80, + 14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80, + 6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80); + __m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, + 0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, + 0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, + 0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3); + __m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, + 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, + 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, + 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80); + __m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80, + 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80, + 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80, + 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80); + + // Masks for bitwise and operation, treating 256 bits as an array of + // 8-bit elements, and considering them in quadruples of neighboring + // elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that + // bitwise and will copy element with index M from input quadruple + // into element with the same index in output quadruple, while the + // other elements in output quadruple will be set to all 0s. + __m256i keep_0 = _mm256_set1_epi32(0xFF); + __m256i keep_3 = _mm256_set1_epi32(0xFF000000); + + // Take each 8-bit element with idx%4==0 from input array to be + // shifted and extend it to 32 bits so that 0s are added to the + // right. Then, perform shifting on this 32-bit number. Upper 8 + // bits will be proper result of shifting original 8-bit number, so + // write them to result array, into the same position from which + // corresponding input element is taken. Also, make sure that + // result array elements with idx%4!=0 are set to all 0s. + // + // Note that number of bits to shift for is extended to 32 bits by + // adding 0s to the left. That means this number is not properly + // sign-extended for negative values. However, number of bits to + // shift is treated as an unsigned integer by respective shift + // intrinsics anyway so if negative then either with or without + // proper sign extension, it will be interpreted as a number greater + // than 32, and the shifting result will be the same. + __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3); + __m256i b0 = _mm256_and_si256(b, keep_0); + __m256i c0; + if (left_shift) + c0 = _mm256_sllv_epi32(a0, b0); + else + if constexpr (std::is_same_v) + c0 = _mm256_srav_epi32(a0, b0); + else + c0 = _mm256_srlv_epi32(a0, b0); + c0 = _mm256_shuffle_epi8(c0, ctl_3_0); + + // Peform shifting the same way for input array elements with + // idx%4==1. + __m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3); + __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0); + __m256i c1; + if (left_shift) + c1 = _mm256_sllv_epi32(a1, b1); + else + if constexpr (std::is_same_v) + c1 = _mm256_srav_epi32(a1, b1); + else + c1 = _mm256_srlv_epi32(a1, b1); + c1 = _mm256_shuffle_epi8(c1, ctl_3_1); + + // Peform shifting the same way for input array elements with + // idx%4==2. + __m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3); + __m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0); + __m256i c2; + if (left_shift) + c2 = _mm256_sllv_epi32(a2, b2); + else + if constexpr (std::is_same_v) + c2 = _mm256_srav_epi32(a2, b2); + else + c2 = _mm256_srlv_epi32(a2, b2); + c2 = _mm256_shuffle_epi8(c2, ctl_3_2); + + // Peform shifting the same way for input array elements with + // idx%4==3. + __m256i a3 = _mm256_and_si256(a, keep_3); + __m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0); + __m256i c3; + if (left_shift) + c3 = _mm256_sllv_epi32(a3, b3); + else + if constexpr (std::is_same_v) + c3 = _mm256_srav_epi32(a3, b3); + else + c3 = _mm256_srlv_epi32(a3, b3); + c3 = _mm256_and_si256(c3, keep_3); + + // Merge partial results into the final result. + __m256i c01 = _mm256_or_si256(c0, c1); + __m256i c23 = _mm256_or_si256(c2, c3); + __m256i c = _mm256_or_si256(c01, c23); + + return c; +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return _mm256_sllv_epi64(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return _mm256_sllv_epi32(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_16(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + // No vector instruction for right arithmetic shifting int64_t, so emulating it + // instead. + + // Clamp the shift values such that shift values < 0 and > 64 are changed to 64 + // which results in -1 for negative input and 0 for non-negative input. + __m256i zero = _mm256_set1_epi64x(0); + __m256i max_shift = _mm256_set1_epi64x(64); + __m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift)); + __m256i shift = _mm256_blendv_epi8(b, max_shift, mask); + // Shift the number logically to the right, thus filling the most + // significant bits with 0s. Then, replace these bits with the sign + // bit. + __m256i sign_bits = _mm256_cmpgt_epi64(zero, a); + __m256i sign_shift = _mm256_sub_epi64(max_shift, shift); + __m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift); + __m256i c = _mm256_srlv_epi64(a, shift); + c = _mm256_or_si256(c, sign_ext); + + return c; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return _mm256_srav_epi32(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_16(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + return shift_256_8(a, b); +} + +#endif + +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..dd6a8c52d8265528be378d5ec2c26650e195bfeb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_mask.h @@ -0,0 +1,93 @@ +#pragma once + +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + +template +struct VecMaskLoad< + T, + 1, + mask_t, + 1, + typename std::enable_if_t< + std::is_same_v || std::is_same_v || + std::is_same_v, + void>> { + static inline VectorizedN apply( + const T* ptr, + const VecMask& vec_mask) { + auto int_mask = vec_mask.template cast()[0]; + if constexpr (std::is_same_v) { + return Vectorized(_mm256_maskload_ps(ptr, int_mask)); + } else { + return Vectorized(_mm256_maskload_epi32(ptr, int_mask)); + } + } +}; + +// TODO: add specialization of VecMaskLoad for bfloat16/half and int8/uint8 + +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return Vectorized(_mm256_castsi256_ps(vec_mask[0])); + } +}; + +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return Vectorized(_mm256_castps_si256(vec_mask[0])); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + auto int_vec = convert(VectorizedN(vec_mask)); + return VecMask(int_vec).cast(); + } +}; + +template <> +inline bool VecMask::all_zero() const { + return _mm256_testz_si256(mask_[0], mask_[0]); +} + +template <> +inline bool VecMask::is_masked(int i) const { + return _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0])) & (1 << i); +} + +template <> +inline bool VecMask::all_masked() const { + int mask = _mm256_movemask_ps(_mm256_castsi256_ps(mask_[0])); + return mask == 0xff; +} + +#define VEC_MASK_METHOD_WITH_CAST_TO_INT( \ + T, N, return_type, method, args_def, args) \ + template <> \ + inline return_type VecMask::method args_def const { \ + return cast().method args; \ + } + +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_masked, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_masked, (), ()) + +#undef VEC_MASK_DEFINE_METHOD_WITH_CAST_TO_INT + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h new file mode 100644 index 0000000000000000000000000000000000000000..c1defcdfd5189a80c7702025254099fded6326ec --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h @@ -0,0 +1,1341 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// Vectorized -> 4x Vectorized +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX2) + +#ifdef _MSC_VER +__declspec(align(64)) struct Vectorizedqi { + protected: + __m256i vals; +#else +struct Vectorizedqi { + protected: + __m256i vals __attribute__((aligned(64))); +#endif + + public: + Vectorizedqi() {} + Vectorizedqi(__m256i v) : vals(v) {} + operator __m256i() const { + return vals; + } +}; + +template +__m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + T min_val, + T max_val); + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i /*first*/, + __m256i /*second*/, + int32_t /*min_val*/, + int32_t /*max_val*/) { + // This function is for linkage only, will not be used + AT_ERROR("pack_saturate_and_clamp is not supported"); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + int8_t min_val, + int8_t max_val) { + __m256i packed_and_sat = _mm256_packs_epi16(first, second); + return _mm256_max_epi8( + _mm256_set1_epi8(min_val), + _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template <> +inline __m256i pack_saturate_and_clamp( + __m256i first, + __m256i second, + uint8_t min_val, + uint8_t max_val) { + __m256i packed_and_sat = _mm256_packus_epi16(first, second); + return _mm256_max_epu8( + _mm256_set1_epi8(min_val), + _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val))); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_int8_to_float(at::vec::Vectorized src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + // Only handle first 8*8 bits + __m128i input_128 = _mm256_castsi256_si128(src); + // Convert from 8*uint8/int8 to 8*int32 + __m256i input_256_int32; + if constexpr (std::is_same_v) + input_256_int32 = _mm256_cvtepu8_epi32(input_128); + else + input_256_int32 = _mm256_cvtepi8_epi32(input_128); + // Convert from 8*int32 to 8*float + return _mm256_cvtepi32_ps(input_256_int32); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_float_to_int8(at::vec::Vectorized src) { + // Convert from float32 to int32 with truncation + __m256i x_values_int32 = _mm256_cvttps_epi32(src); + + // Convert from int32 to int16 using signed saturation + __m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32); + + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + + // Convert from int16 to uint8/int8 using unsigned saturation + __m256i xyzw_clamped_v = pack_saturate_and_clamp( + xy_packed_v, xy_packed_v, min_val, max_val); + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); +} + +template +__FORCE_INLINE void QuantizeAvx2( + const float* src, + T* dst, + int len, + float inverse_scale, + int64_t zero_point) { + constexpr int VLEN = 8; + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + const __m256i min_v = _mm256_set1_epi32(min_val); + const __m256i max_v = _mm256_set1_epi32(max_val); + // This is the largest int32 value < int32_max exactly representable in float + constexpr int32_t int32_float_max_val = + std::numeric_limits::max() - 127; + int i = 0; + __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale); + // clang-format off + static const __m256i shuffle_mask_v = _mm256_set_epi8( + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00); + // clang-format on + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256i permute_mask_l8_v = + _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00); + int len_aligned = len / (VLEN * 4) * (VLEN * 4); + for (; i < len_aligned; i += 4 * VLEN) { + // x + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + // If the floating point value is greater than int32_max, + // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to + // Clip at int32_float_max_val to avoid this. + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // y + __m256 y_vals = _mm256_load_ps(src + i + VLEN); + __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v); + y_transformed_v = + _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // z + __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN); + __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v); + z_transformed_v = + _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val)); + // w + __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN); + __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v); + w_transformed_v = + _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val)); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v); + + // add zero point + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point)); + z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point)); + w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point)); + + __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v); + __m256i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + xyzw_clamped_v = + _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v); + } + + // Additional 8-lane AVX2 version to take advantage when len is smaller + // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM) + for (; i < len / VLEN * VLEN; i += VLEN) { + __m256 x_vals = _mm256_load_ps(src + i); + __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v); + x_transformed_v = + _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val)); + __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v); + x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point)); + __m256i x_clipped_v = + _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v)); + + x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v); + x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v); + _mm_storel_epi64( + reinterpret_cast<__m128i*>(dst + i), + _mm256_castsi256_si128(x_clipped_v)); + } + + for (; i < len; ++i) { + float transformed = src[i] * inverse_scale; + + // Not exactly the same behavior as the vectorized code. + // The vectorized code above always rounds to even in halfway cases + // (https://software.intel.com/en-us/node/523819), but std::nearbyint + // does the same only when the current rounding mode is FE_TONEAREST. + // However, in practice, this should not be a problem because most cases + // use the default rounding mode FE_TONEAREST. + // Note that we cannot implement the same behavior as the vectorized code + // using std::round because it does rounding away from zero in halfway + // cases. + transformed = zero_point + std::nearbyint(transformed); + float clipped = + std::min(std::max(transformed, float(min_val)), float(max_val)); + dst[i] = clipped; + } +} + +template<> +struct Vectorized : public Vectorizedqi { + using size_type = int; + static constexpr size_type size() { + return 8; + } + + static constexpr int float_num_vecs() { + return 1; + } + + static constexpr int int_num_vecs() { + return 1; + } + + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint32& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi32(uw); + } + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m256 float_vals = _mm256_cvtepi32_ps(vals); + return {vec::fmadd(scale, Vectorized(float_vals), scale_zp_premul)}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m256 float_vals = _mm256_cvtepi32_ps(vals); + return {(Vectorized(float_vals) - zero_point) * scale}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + Vectorized retval; + auto rhs_data = (__m256)rhs[0]; + at::native::quantize_vec( + scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8); + return retval; + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi32(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi32(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi32( + _mm256_max_epi32(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {_mm256_sub_epi32(vals, b)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + + __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v); + __m256i rounded = _mm256_cvtps_epi32(scaled); + return _mm256_add_epi32(rounded, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + return _mm256_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + return _mm256_add_epi32(a, b); +} + +/* + * Convert values from int32 back to int8/uint8 + */ +template +__m256i RequantizeAvx2( + const std::array, 4>& inp, + __m256 multiplier, + __m256i zp) { + static_assert( + std::is_same_v || std::is_same_v, + "Only int8_t/uint8_t are supported"); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + __m256i permute_mask_v = + _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); + __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier); + __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier); + __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier); + __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier); + + __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v); + __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v); + __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v); + __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v); + + /* Add zero point */ + __m256i x_v = _mm256_add_epi32(x_rounded_v, zp); + __m256i y_v = _mm256_add_epi32(y_rounded_v, zp); + __m256i z_v = _mm256_add_epi32(z_rounded_v, zp); + __m256i w_v = _mm256_add_epi32(w_rounded_v, zp); + + /* Pack to int16_t and saturate */ + __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v); + __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v); + + __m256i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + /* + * xyzw_clamped_v has results in the following layout so we need to + * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 + */ + xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v); + return xyzw_clamped_v; +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + + Vectorized() {} + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // This is needed because the compiler emits awful code for the default + // constructor for moving the enum + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepi8_epi32(__m128i epi8_vals) const { + return _mm256_cvtepi8_epi32(epi8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_neg_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_neg_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_neg_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_neg_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_neg_zp_premul); + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + int8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epi8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epi8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epi8( + _mm256_max_epi8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepi8_epi32(int_val0); + __m256i int32_val1 = cvtepi8_epi32(int_val1); + __m256i int32_val2 = cvtepi8_epi32(int_val2); + __m256i int32_val3 = cvtepi8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepi8_epi32(int_b0); + __m256i int32_b1 = cvtepi8_epi32(int_b1); + __m256i int32_b2 = cvtepi8_epi32(int_b2); + __m256i int32_b3 = cvtepi8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 32; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m256i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::quint8& val) { + value_type uw = val.val_; + vals = _mm256_set1_epi8(uw); + } + + // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy) + C10_CLANG_DIAGNOSTIC_PUSH() + #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy") + C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy") + #endif + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + C10_CLANG_DIAGNOSTIC_POP() + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm256_storeu_si256((__m256i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return _mm256_loadu_si256((const __m256i*)tmp_values); + } + + private: + __m256i cvtepu8_epi32(__m128i epu8_vals) const { + return _mm256_cvtepu8_epi32(epu8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized /*zero_point*/, + Vectorized scale_zp_premul) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_zp_premul); + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float /*scale*/, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + uint8_t quantized_values[32]; + QuantizeAvx2( + rhs_data, quantized_values, 32, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm256_max_epu8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm256_min_epu8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm256_min_epu8( + _mm256_max_epu8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0)); + __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1)); + __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2)); + __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3)); + + __m256i int32_val0 = cvtepu8_epi32(int_val0); + __m256i int32_val1 = cvtepu8_epi32(int_val1); + __m256i int32_val2 = cvtepu8_epi32(int_val2); + __m256i int32_val3 = cvtepu8_epi32(int_val3); + + __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0)); + __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1)); + __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2)); + __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3)); + + __m256i int32_b0 = cvtepu8_epi32(int_b0); + __m256i int32_b1 = cvtepu8_epi32(int_b1); + __m256i int32_b2 = cvtepu8_epi32(int_b2); + __m256i int32_b3 = cvtepu8_epi32(int_b3); + + __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0); + __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1); + __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2); + __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3); + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m256 multiplier_v = _mm256_set1_ps(multiplier); + __m256i zero_point_v = _mm256_set1_epi32(zero_point); + return RequantizeAvx2(inp, multiplier_v, zero_point_v); + } + + private: + + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm256_loadu_si256((const __m256i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#else + +// NOTE: These are low-performance implementations that we fall back on +// if we are not building with AVX2. This may not be an issue, because +// currently for quantization we assume the user has at least AVX512 +// installed, so these can simply act as a reference implementation. +// +// If in the future we relax this requirement (AVX2+), we should probably +// revisit these implementations + +template < + typename T, + typename float_vec_return_type_, + typename int_vec_return_type_, + int size_> +struct VectorizedQuantizedConverter { + static constexpr int size() { + return size_; + } + + static constexpr int float_num_vecs() { + return size() / 8; + } + + static constexpr int int_num_vecs() { + return size() / 8; + } + + using float_vec_return_type = float_vec_return_type_; + using int_vec_return_type = int_vec_return_type_; + + using value_type = typename T::underlying; + std::array vals; + + VectorizedQuantizedConverter(T val) { + for (const auto i : c10::irange(size())) { + vals[i] = val.val_; + } + } + + VectorizedQuantizedConverter(const void* ptr) { + memcpy(vals.data(), ptr, sizeof(value_type) * size()); + } + + void store(void* ptr, int count = size()) const { + memcpy(ptr, vals.data(), count * sizeof(value_type)); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized /*scale_zp_premul*/) const { + float_vec_return_type rv; + for (const auto i : c10::irange(float_num_vecs())) { + float tmp_vals[8]; + for (const auto j : c10::irange(8)) { + tmp_vals[j] = at::native::dequantize_val( + scale[j], zero_point[j], T(vals[8 * i + j])); + } + rv[i] = Vectorized(tmp_vals[0], + tmp_vals[1], + tmp_vals[2], + tmp_vals[3], + tmp_vals[4], + tmp_vals[5], + tmp_vals[6], + tmp_vals[7]); + } + return rv; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + Vectorized scale_zp_premul; + return dequantize(scale, zero_point, scale_zp_premul); + } + + protected: + VectorizedQuantizedConverter() {} +}; + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>() {} + Vectorized(c10::qint32 val) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 8>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint32*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + for (const auto i : c10::irange(size())) { + retval[0].vals[i] = vals[i] - b.vals[i]; + } + return retval; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = + std::nearbyint(static_cast(inp[0].vals[i]) * multiplier) + + zero_point; + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] * b.vals[i]; + } + return retval; +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] + b.vals[i]; + } + return retval; +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::qint8 val) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32> { + Vectorized() + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>() {} + Vectorized(c10::quint8 val) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 32>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy( + tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return Vectorized(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float /*inverse_scale*/) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 8], 8); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::quint8*)qvals.data(), + 8 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#endif // if defined(CPU_CAPABILITY_AVX2) +}} // namespace at::vec::CPU_CAPABILITY diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..98ac8396317943c3e98d8a0be207493dc66e3155 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h @@ -0,0 +1,246 @@ +#pragma once + +#include +#include +#include + +// Note: header order is important here +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace at { +namespace vec { + +inline namespace CPU_CAPABILITY { + +DEFINE_CLAMP_FUNCS(c10::quint8) +DEFINE_CLAMP_FUNCS(c10::qint8) +DEFINE_CLAMP_FUNCS(c10::qint32) +DEFINE_CLAMP_FUNCS(int16_t) +DEFINE_CLAMP_FUNCS(int32_t) +DEFINE_CLAMP_FUNCS(int64_t) +DEFINE_CLAMP_FUNCS(float) +DEFINE_CLAMP_FUNCS(double) + +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + vec_madd(a.vec0(), b.vec0(), c.vec0()), + vec_madd(a.vec1(), b.vec1(), c.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} +template <> +Vectorized C10_ALWAYS_INLINE fmadd( + const Vectorized& a, + const Vectorized& b, + const Vectorized& c) { + return Vectorized{ + a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; +} + +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t) +DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t) + +template <> +Vectorized C10_ALWAYS_INLINE +convert_to_int_of_same_size(const Vectorized& src) { + return Vectorized{vec_signed(src.vec0()), vec_signed(src.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE +convert_to_int_of_same_size( + const Vectorized& src) { + return Vectorized{vec_signed(src.vec0()), vec_signed(src.vec1())}; +} + +template <> +inline void convert(const int32_t* src, float* dst, int64_t n) { + // int32_t and float have same size + int64_t i; + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + const int32_t* src_a = src + i; + float* dst_a = dst + i; + vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast(src_a)); + vint32 input_vec1 = + vec_vsx_ld(offset16, reinterpret_cast(src_a)); + vfloat32 c0 = vec_float(input_vec0); + vfloat32 c1 = vec_float(input_vec1); + vec_vsx_st(c0, offset0, dst_a); + vec_vsx_st(c1, offset16, dst_a); + } + + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} + +template <> +inline void convert(const int64_t* src, double* dst, int64_t n) { + int64_t i; + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + const int64_t* src_a = src + i; + double* dst_a = dst + i; + vint64 input_vec0 = + vec_vsx_ld(offset0, reinterpret_cast(src_a)); + vint64 input_vec1 = + vec_vsx_ld(offset16, reinterpret_cast(src_a)); + vfloat64 c0 = vec_double(input_vec0); + vfloat64 c1 = vec_double(input_vec1); + vec_vsx_st(c0, offset0, reinterpret_cast(dst_a)); + vec_vsx_st(c1, offset16, reinterpret_cast(dst_a)); + } + for (; i < n; i++) { + dst[i] = static_cast(src[i]); + } +} +//Generic implementation to fix compiler error +//TO-DO : Add optimized version for ppc64 +inline std::tuple, Vectorized> convert_half_float( + const Vectorized& a) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ Half arr2[K]; + a.store(arr2); + convert(arr2, arr, K); + return std::make_tuple( + Vectorized::loadu(arr), + Vectorized::loadu(arr + Vectorized::size())); +} + +inline Vectorized convert_float_half( + const Vectorized& a, const Vectorized& b) { + constexpr int64_t K = Vectorized::size(); + __at_align__ float arr[K]; + __at_align__ Half arr2[K]; + a.store(arr); + b.store(arr + Vectorized::size()); + convert(arr, arr2, K); + return Vectorized::loadu(arr2); +}; + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3} + // b = {b0, b1, b2, b3} + + vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0); + vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3); + vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0); + vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3); + // return {a0, b0, a1, b1} + // {a2, b2, a3, b3} + return std::make_pair( + Vectorized{ab00, ab11}, Vectorized{ab2_00, ab2_11}); +} + +template <> +std::pair, Vectorized> inline deinterleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1} + // b = {a2, b2, a3, b3} + vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0); + vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0); + + vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3); + vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3); + + // swap lanes: + // return {a0, a1, a2, a3} + // {b0, b1, b2, b3} + return std::make_pair( + Vectorized{aa01, aa23}, Vectorized{bb_01, bb_23}); +} + +template <> +std::pair, Vectorized> inline interleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3,, a4, a5, a6, a7} + // b = {b0, b1, b2, b3,, b4, b5, b6, b7} + + vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0()); + vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0()); + + vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1()); + vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1()); + // group cols crossing lanes: + // return {a0, b0, a1, b1,, a2, b2, a3, b3} + // {a4, b4, a5, b5,, a6, b6, a7, b7} + + return std::make_pair( + Vectorized{ab0011, ab2233}, Vectorized{ab2_0011, ab2_2233}); +} + +template <> +std::pair, Vectorized> inline deinterleave2( + const Vectorized& a, + const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1,, a2, b2, a3, b3} + // b = {a4, b4, a5, b5,, a6, b6, a7, b7} + + // {a0,a2,b0,b2} {a1,a3,b1,b3} + vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1()); + vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1()); + + vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3); + vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3); + + vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1()); + vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1()); + + vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2); + vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2); + + // it could be done with vec_perm ,too + // swap lanes: + // return {a0, a1, a2, a3,, a4, a5, a6, a7} + // {b0, b1, b2, b3,, b4, b5, b6, b7} + + return std::make_pair( + Vectorized{aa0123, aa0123_2}, Vectorized{bb0123, bb0123_2}); +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..2c74847758d84e866df7e1c3cc802a6e61cea8d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h @@ -0,0 +1,584 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +using ComplexDbl = c10::complex; + +template <> +class Vectorized { + union { + struct { + vfloat64 _vec0; + vfloat64 _vec1; + }; + struct { + vbool64 _vecb0; + vbool64 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = ComplexDbl; + using vec_internal_type = vfloat64; + using vec_internal_mask_type = vbool64; + using size_type = int; + static constexpr size_type size() { + return 2; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} + + Vectorized(ComplexDbl val) { + double real_value = val.real(); + double imag_value = val.imag(); + _vec0 = vfloat64{real_value, imag_value}; + _vec1 = vfloat64{real_value, imag_value}; + } + Vectorized(ComplexDbl val1, ComplexDbl val2) { + _vec0 = vfloat64{val1.real(), val1.imag()}; + _vec1 = vfloat64{val2.real(), val2.imag()}; + } + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t> + C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {a._vec0, b._vec1}; + } + + template + static Vectorized C10_ALWAYS_INLINE + el_blend(const Vectorized& a, const Vectorized& b) { + const vbool64 mask_1st = VsxDblMask1(mask); + const vbool64 mask_2nd = VsxDblMask2(mask); + return { + (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), + (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)}; + } + + static Vectorized blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // convert std::complex index mask to V index mask: xy -> xxyy + auto mask_complex = + Vectorized(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0)); + return { + vec_sel(a._vec0, b._vec0, mask_complex._vecb0), + vec_sel(a._vec1, b._vec1, mask_complex._vecb1)}; + } + + static Vectorized C10_ALWAYS_INLINE elwise_blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + template + static Vectorized arange( + ComplexDbl base = 0., + step_t step = static_cast(1)) { + return Vectorized(base, base + step); + } + static Vectorized set( + const Vectorized& a, + const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + } + return b; + } + + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return { + vec_vsx_ld(offset0, reinterpret_cast(tmp_values)), + vec_vsx_ld(offset16, reinterpret_cast(tmp_values))}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, reinterpret_cast(tmp_values)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(tmp_values)); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + const ComplexDbl& operator[](int idx) const = delete; + ComplexDbl& operator[](int idx) = delete; + + Vectorized map(ComplexDbl (*const f)(ComplexDbl)) const { + __at_align__ ComplexDbl tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + Vectorized map(ComplexDbl (*const f)(const ComplexDbl&)) const { + __at_align__ ComplexDbl tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + + Vectorized el_swapped() const { + vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2); + vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2); + return {v0, v1}; + } + + Vectorized el_madd( + const Vectorized& multiplier, + const Vectorized& val) const { + return { + vec_madd(_vec0, multiplier._vec0, val._vec0), + vec_madd(_vec1, multiplier._vec1, val._vec1)}; + } + + Vectorized el_mergeo() const { + vfloat64 v0 = vec_splat(_vec0, 1); + vfloat64 v1 = vec_splat(_vec1, 1); + return {v0, v1}; + } + + Vectorized el_mergee() const { + vfloat64 v0 = vec_splat(_vec0, 0); + vfloat64 v1 = vec_splat(_vec1, 0); + return {v0, v1}; + } + + static Vectorized el_mergee( + Vectorized& first, + Vectorized& second) { + return { + vec_mergeh(first._vec0, second._vec0), + vec_mergeh(first._vec1, second._vec1)}; + } + + static Vectorized el_mergeo( + Vectorized& first, + Vectorized& second) { + return { + vec_mergel(first._vec0, second._vec0), + vec_mergel(first._vec1, second._vec1)}; + } + + Vectorized abs_2_() const { + auto a = (*this).elwise_mult(*this); + auto permuted = a.el_swapped(); + a = a + permuted; + return a; + } + + Vectorized abs_() const { + auto vi = el_mergeo(); + auto vr = el_mergee(); + return {Sleef_hypotd2_u05vsx(vr._vec0, vi._vec0), Sleef_hypotd2_u05vsx(vr._vec1, vi._vec1)}; + } + + Vectorized abs() const { + return abs_() & vd_real_mask; + } + + Vectorized angle_() const { + // angle = atan2(b/a) + // auto b_a = _mm256_permute_pd(values, 0x05); // b a + // return Sleef_atan2d4_u10(values, b_a); // 90-angle angle + Vectorized ret; + ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]); + ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]); + return ret; + } + + Vectorized angle() const { + return angle_() & vd_real_mask; + } + + Vectorized real_() const { + return *this & vd_real_mask; + } + Vectorized real() const { + return *this & vd_real_mask; + } + Vectorized imag_() const { + return *this & vd_imag_mask; + } + Vectorized imag() const { + return imag_().el_swapped(); + } + + Vectorized conj_() const { + return *this ^ vd_isign_mask; + } + Vectorized conj() const { + return *this ^ vd_isign_mask; + } + + Vectorized log() const { + // Most trigonomic ops use the log() op to improve complex number + // performance. + return map(std::log); + } + + Vectorized log2() const { + // log2eB_inv + auto ret = log(); + return ret.elwise_mult(vd_log2e_inv); + } + Vectorized log10() const { + auto ret = log(); + return ret.elwise_mult(vd_log10e_inv); + } + + Vectorized log1p() const { + return map(std::log1p); + } + + Vectorized asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + auto conj = conj_(); + auto b_a = conj.el_swapped(); + auto ab = conj.elwise_mult(b_a); + auto im = ab + ab; + auto val_2 = (*this).elwise_mult(*this); + auto val_2_swapped = val_2.el_swapped(); + auto re = horizontal_sub(val_2, val_2_swapped); + re = Vectorized(vd_one) - re; + auto root = el_blend<0x0A>(re, im).sqrt(); + auto ln = (b_a + root).log(); + return ln.el_swapped().conj(); + } + + Vectorized acos() const { + // acos(x) = pi/2 - asin(x) + return Vectorized(vd_pi_2) - asin(); + } + + Vectorized atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + auto ione = Vectorized(vd_imag_one); + auto sum = ione + *this; + auto sub = ione - *this; + auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) + return ln * vd_imag_half; // i/2*ln() + } + Vectorized atanh() const { + return map(std::atanh); + } + + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized ceil() const { + return {vec_ceil(_vec0), vec_ceil(_vec1)}; + } + Vectorized floor() const { + return {vec_floor(_vec0), vec_floor(_vec1)}; + } + Vectorized neg() const { + auto z = Vectorized(vd_zero); + return z - *this; + } + Vectorized round() const { + return {vec_rint(_vec0), vec_rint(_vec1)}; + } + + Vectorized trunc() const { + return {vec_trunc(_vec0), vec_trunc(_vec1)}; + } + + Vectorized elwise_sqrt() const { + return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; + } + + Vectorized sqrt() const { + return map(std::sqrt); + } + + Vectorized reciprocal() const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() = c/abs_2() + // im = (bc - ad)/abs_2() = d/abs_2() + auto c_d = *this ^ vd_isign_mask; // c -d + auto abs = abs_2_(); + return c_d.elwise_div(abs); + } + + Vectorized rsqrt() const { + return sqrt().reciprocal(); + } + + static Vectorized horizontal_add( + Vectorized& first, + Vectorized& second) { + // Operates on individual floats, see _mm_hadd_ps + // {f0+f1, s0+s1, f2+f3, s2+s3, ...} + // i.e. it sums the re and im of each value and interleaves first and second: + // {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...} + return el_mergee(first, second) + el_mergeo(first, second); + } + + static Vectorized horizontal_sub( + Vectorized& first, + Vectorized& second) { + // we will simulate it differently with 6 instructions total + // lets permute second so that we can add it getting horizontal sums + auto first_perm = first.el_swapped(); // 2perm + auto second_perm = second.el_swapped(); // 2perm + // summ + auto first_ret = first - first_perm; // 2sub + auto second_ret = second - second_perm; // 2 sub + // now lets choose evens + return el_mergee(first_ret, second_ret); // 2 mergee's + } + + Vectorized inline operator*(const Vectorized& b) const { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i +#if 1 + // this is more vsx friendly than simulating horizontal from x86 + auto vi = b.el_mergeo(); + auto vr = b.el_mergee(); + vi = vi ^ vd_rsign_mask; + auto ret = elwise_mult(vr); + auto vx_swapped = el_swapped(); + ret = vx_swapped.el_madd(vi, ret); +#else + auto ac_bd = elwise_mult(b); + auto d_c = b.el_swapped(); + d_c = d_c ^ vd_isign_mask; + auto ad_bc = elwise_mult(d_c); + auto ret = horizontal_sub(ac_bd, ad_bc); +#endif + return ret; + } + + Vectorized inline operator/(const Vectorized& b) const { + // re + im*i = (a + bi) / (c + di) + // re = (ac + bd)/abs_2() + // im = (bc - ad)/abs_2() + auto fabs_cd = Vectorized{ + vec_andc(b._vec0, vd_sign_mask), + vec_andc(b._vec1, vd_sign_mask)}; // |c| |d| + auto fabs_dc = fabs_cd.el_swapped(); // |d| |c| + auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|) + auto a2 = elwise_div(scale); // a/sc b/sc + auto b2 = b.elwise_div(scale); // c/sc d/sc + auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2 + auto dc2 = b2.el_swapped(); // d/sc c/sc + dc2 = dc2 ^ vd_rsign_mask; // -d/sc c/sc + auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2 + auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2 + auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + ret = ret.elwise_div(denom2); + return ret; + } + + Vectorized exp() const { + return map(std::exp); + } + Vectorized exp2() const { + return map(exp2_impl); + } + Vectorized expm1() const { + return map(std::expm1); + } + + Vectorized pow(const Vectorized& exp) const { + __at_align__ ComplexDbl x_tmp[size()]; + __at_align__ ComplexDbl y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + Vectorized operator<(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator<=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator>(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized operator>=(const Vectorized& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized eq(const Vectorized& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & vd_one; + } + Vectorized ne(const Vectorized& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & vd_one; + } + + DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne) + + DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add) + DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub) + DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and) + DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or) + DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor) + // elementwise helpers + DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul) + DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div) + DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt) + DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge) + DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt) + DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple) + DEFINE_MEMBER_OP(elwise_max, ComplexDbl, vec_max) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); + // auto max = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_lt(abs_b); + auto max = Vectorized::elwise_blendv(a, b, mask); + + return max; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(max, isnan); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); + // auto min = _mm256_blendv_ps(a, b, mask); + auto mask = abs_a.elwise_gt(abs_b); + auto min = Vectorized::elwise_blendv(a, b, mask); + return min; + // Exploit the fact that all-ones is a NaN. + // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); + // return _mm256_or_ps(min, isnan); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..f8217930fa4989586ed134eb42dd2afe9ce7746c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h @@ -0,0 +1,286 @@ +#pragma once + +#include +#include +#include +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +template <> +class Vectorized { + private: + union { + struct { + vint64 _vec0; + vint64 _vec1; + }; + struct { + vbool64 _vecb0; + vbool64 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + using value_type = int64_t; + using vec_internal_type = vint64; + using vec_internal_mask_type = vbool64; + using size_type = int; + using ElementType = signed long long; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} + C10_ALWAYS_INLINE Vectorized(int64_t scalar) + : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} + C10_ALWAYS_INLINE Vectorized( + int64_t scalar1, + int64_t scalar2, + int64_t scalar3, + int64_t scalar4) + : _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {} + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return a; + } + + template + static std::enable_if_t> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return {b._vec0, a._vec1}; + } + + template + static std::enable_if_t<(mask & 15) == 15, Vectorized> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + return b; + } + + template + static std::enable_if_t<(mask > 0 && mask < 3), Vectorized> C10_ALWAYS_INLINE + blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + const vbool64 mask_1st = (vbool64){g0, g1}; + return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1}; + } + + template + static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; + constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; + + const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; + return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; + } + + template + static std::enable_if_t< + (mask > 3) && (mask & 3) != 0 && (mask & 15) != 15, + Vectorized> + C10_ALWAYS_INLINE blend(const Vectorized& a, const Vectorized& b) { + constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; + constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; + constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; + constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; + + const vbool64 mask_1st = (vbool64){g0, g1}; + const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; + return { + (vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), + (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; + } + + static Vectorized C10_ALWAYS_INLINE blendv( + const Vectorized& a, + const Vectorized& b, + const Vectorized& mask) { + // the mask used here returned by comparision of vec256 + + return { + vec_sel(a._vec0, b._vec0, mask._vecb0), + vec_sel(a._vec1, b._vec1, mask._vecb1)}; + } + template + static Vectorized arange(int64_t base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step); + } + + static Vectorized C10_ALWAYS_INLINE + set(const Vectorized& a, + const Vectorized& b, + size_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + + return b; + } + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + static_assert(sizeof(double) == sizeof(value_type)); + const double* dptr = reinterpret_cast(ptr); + return {// treat it as double load + (vint64)vec_vsx_ld(offset0, dptr), + (vint64)vec_vsx_ld(offset16, dptr)}; + } + + __at_align__ double tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return { + (vint64)vec_vsx_ld(offset0, tmp_values), + (vint64)vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + double* dptr = reinterpret_cast(ptr); + vec_vsx_st((vfloat64)_vec0, offset0, dptr); + vec_vsx_st((vfloat64)_vec1, offset16, dptr); + } else if (count > 0) { + __at_align__ double tmp_values[size()]; + vec_vsx_st((vfloat64)_vec0, offset0, tmp_values); + vec_vsx_st((vfloat64)_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + const int64_t& operator[](int idx) const = delete; + int64_t& operator[](int idx) = delete; + + Vectorized angle() const { + return blendv( + Vectorized(0), Vectorized(c10::pi), *this < Vectorized(0)); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return Vectorized{0}; + } + Vectorized conj() const { + return *this; + } + + Vectorized C10_ALWAYS_INLINE abs() const { + return {vec_abs(_vec0), vec_abs(_vec1)}; + } + + Vectorized C10_ALWAYS_INLINE neg() const { + return {vec_neg(_vec0), vec_neg(_vec1)}; + } + + DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not) + DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne) + DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple) + DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge) + DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq) + DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne) + DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt) + DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple) + DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt) + DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge) + DEFINE_MEMBER_OP(operator+, int64_t, vec_add) + DEFINE_MEMBER_OP(operator-, int64_t, vec_sub) + DEFINE_MEMBER_OP(operator*, int64_t, vec_mul) + DEFINE_MEMBER_OP(operator/, int64_t, vec_div) + DEFINE_MEMBER_OP(maximum, int64_t, vec_max) + DEFINE_MEMBER_OP(minimum, int64_t, vec_min) + DEFINE_MEMBER_OP(operator&, int64_t, vec_and) + DEFINE_MEMBER_OP(operator|, int64_t, vec_or) + DEFINE_MEMBER_OP(operator^, int64_t, vec_xor) +}; + +template <> +Vectorized inline operator<<(const Vectorized& a, const Vectorized& b) { + vuint64 shift_vec0 = reinterpret_cast(b.vec0()); + vuint64 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline operator>>(const Vectorized& a, const Vectorized& b) { + vuint64 shift_vec0 = reinterpret_cast(b.vec0()); + vuint64 shift_vec1 = reinterpret_cast(b.vec1()) ; + return Vectorized{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)}; +} + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_div(a.vec0(), b.vec0()), vec_div(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h new file mode 100644 index 0000000000000000000000000000000000000000..8068d6102f4a125bd5a5cc55ee93d1ff74f7361d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h @@ -0,0 +1,281 @@ +#pragma once + +#include +#include +#include +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +template <> +struct Vectorized { + private: + union { + struct { + vint32 _vec0; + vint32 _vec1; + }; + struct { + vbool32 _vecb0; + vbool32 _vecb1; + }; + + } __attribute__((__may_alias__)); + + public: + Vectorized() {} + + using size_type = int; + static constexpr size_type size() { + return 8; + } + + static constexpr size_t float_num_vecs() { + return 1; + } + static constexpr int int_num_vecs() { + return 1; + } + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + using vec_internal_type = vint32; + using vec_internal_mask_type = vbool32; + C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {} + C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} + C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {} + C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} + + Vectorized(const c10::qint32& val) + : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {} + + static Vectorized C10_ALWAYS_INLINE + loadu(const void* ptr, int count = size()) { + if (count == size()) { + return { + vec_vsx_ld(offset0, reinterpret_cast(ptr)), + vec_vsx_ld(offset16, reinterpret_cast(ptr))}; + } + + __at_align__ value_type tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); + + return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; + } + void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + if (count == size()) { + vec_vsx_st(_vec0, offset0, reinterpret_cast(ptr)); + vec_vsx_st(_vec1, offset16, reinterpret_cast(ptr)); + } else if (count > 0) { + __at_align__ value_type tmp_values[size()]; + vec_vsx_st(_vec0, offset0, tmp_values); + vec_vsx_st(_vec1, offset16, tmp_values); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); + } + } + + C10_ALWAYS_INLINE const vec_internal_type& vec0() const { + return _vec0; + } + C10_ALWAYS_INLINE const vec_internal_type& vec1() const { + return _vec1; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + vfloat32 float_vals0 = vec_float(_vec0); + vfloat32 float_vals1 = vec_float(_vec1); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); + vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); + return {Vectorized{ + vec_madd(scale_vec0, float_vals0, scale_zp_premul0), + vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + vfloat32 float_vals0 = vec_float(_vec0); + vfloat32 float_vals1 = vec_float(_vec1); + vfloat32 scale_vec0 = scale.vec0(); + vfloat32 scale_vec1 = scale.vec1(); + vfloat32 zero_point0 = zero_point.vec0(); + vfloat32 zero_point1 = zero_point.vec1(); + return {Vectorized{ + (float_vals0 - zero_point0) * scale_vec0, + (float_vals1 - zero_point1) * scale_vec1}}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + Vectorized retval; + + const vint32 vmin = vec_splats(std::numeric_limits::min()); + const vint32 vmax = vec_splats(std::numeric_limits::max()); + vfloat32 inverse_scale_v = vec_splats(inverse_scale); + vfloat32 vec_zero_point = vec_splats((float)(zero_point)); + Vectorized vf0 = rhs[0]; + + vfloat32 vecf0 = vf0.vec0(); + vfloat32 vecf1 = vf0.vec1(); + vecf0 = vec_mul(vecf0, inverse_scale_v); + vecf1 = vec_mul(vecf1, inverse_scale_v); + vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); + vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); + vint32 veci0 = vec_signed(vecf0); + vint32 veci1 = vec_signed(vecf1); + + veci0 = vec_max(veci0, vmin); + veci1 = vec_max(veci1, vmin); + veci0 = vec_min(veci0, vmax); + veci1 = vec_min(veci1, vmax); + + return {veci0, veci1}; + } + + Vectorized relu(Vectorized zero_point) const { + return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) const { + vint32 max0 = vec_max(_vec0, zero_point._vec0); + vint32 max1 = vec_max(_vec1, zero_point._vec1); + return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {*this - b}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + const vint32 vmin = vec_splats(std::numeric_limits::min()); + const vint32 vmax = vec_splats(std::numeric_limits::max()); + vfloat32 vec_mult = vec_splats(multiplier); + vint32 vec_zero_point = vec_splats(zero_point); + Vectorized vi = inp[0]; + vfloat32 vecf0 = vec_float(vi.vec0()); + vfloat32 vecf1 = vec_float(vi.vec1()); + + vecf0 = vec_mul(vecf0, vec_mult); + vecf1 = vec_mul(vecf1, vec_mult); + + vecf0 = vec_rint(vecf0); + vecf1 = vec_rint(vecf1); + + vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point); + vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point); + + veci0 = vec_max(veci0, vmin); + veci1 = vec_max(veci1, vmin); + veci0 = vec_min(veci0, vmax); + veci1 = vec_min(veci1, vmax); + + return {veci0, veci1}; + } + + DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq) + DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne) + DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt) + DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple) + DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt) + DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge) + DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add) + DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub) + DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul) + DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /) + DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max) + DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min) + DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and) + DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or) + DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor) +}; + +template <> +Vectorized inline maximum( + const Vectorized& a, + const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline minimum( + const Vectorized& a, + const Vectorized& b) { + return a.minimum(b); +} + +template <> +Vectorized C10_ALWAYS_INLINE operator+(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_add(a.vec0(), b.vec0()), vec_add(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator-(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_sub(a.vec0(), b.vec0()), vec_sub(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator*(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_mul(a.vec0(), b.vec0()), vec_mul(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator/(const Vectorized& a, const Vectorized& b) { + return Vectorized{a.vec0()/b.vec0(), a.vec1()/b.vec1()}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator&(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_and(a.vec0(), b.vec0()), vec_and(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator|(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_or(a.vec0(), b.vec0()), vec_or(a.vec1(), b.vec1())}; +} + +template <> +Vectorized C10_ALWAYS_INLINE operator^(const Vectorized& a, const Vectorized& b) { + return Vectorized{vec_xor(a.vec0(), b.vec0()), vec_xor(a.vec1(), b.vec1())}; +} + +} // namespace +} // namespace vec +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h new file mode 100644 index 0000000000000000000000000000000000000000..c7fa23b23a6074e15c13d4bf9f7b7079251d2b6b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h @@ -0,0 +1,279 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace at { +namespace vec { + +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { + stream << val.val_; + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { + stream << static_cast(val.val_); + return stream; +} +inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { + stream << static_cast(val.val_); + return stream; +} + +template +std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) { + T buf[Vectorized::size()]; + vec.store(buf); + stream << "vec["; + for (int i = 0; i != Vectorized::size(); i++) { + if (i != 0) { + stream << ", "; + } + stream << buf[i]; + } + stream << "]"; + return stream; +} + + +#if defined(CPU_CAPABILITY_AVX512) + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castpd_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castps_pd(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castsi512_ps(src); +} + +template<> +inline Vectorized cast(const Vectorized& src) { + return _mm512_castsi512_pd(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#ifndef _MSC_VER +// MSVC is not working well on complex function overload. +template +std::enable_if_t> +inline gather(const double* base_addr, const Vectorized& vindex) { + return _mm512_i64gather_pd(vindex, base_addr, scale); +} + +template +std::enable_if_t> +inline gather(const float* base_addr, const Vectorized& vindex) { + return _mm512_i32gather_ps(vindex, base_addr, scale); +} +#endif +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#ifndef _MSC_VER +// MSVC is not working well on complex function overload. +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const double* base_addr, + const Vectorized& vindex, Vectorized& mask) { + auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF)); + auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ); + return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, const float* base_addr, + const Vectorized& vindex, Vectorized& mask) { + auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF)); + auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ); + return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale); +} +#endif +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm512_cvtpd_epi64(src); +} + +template<> +Vectorized +inline convert_to_int_of_same_size(const Vectorized &src) { + return _mm512_cvttps_epi32(src); +} + +template<> +Vectorized +inline convert_to_fp_of_same_size(const Vectorized &src) { + return _mm512_cvtepi64_pd(src); +} + +template<> +Vectorized +inline convert_to_fp_of_same_size(const Vectorized &src) { + return _mm512_cvtepi32_ps(src); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a3, a3, a4, a5, a6, a7} + // b = {b0, b1, b2, b3, b4, b5, b6, b7} + // group cols crossing lanes: + // return {a0, b0, a1, b1, a2, b2, a3, b3} + // {a4, b4, a5, b5, a6, b6, a7, b7} + __m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4); + return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); +} + +template <> +std::pair, Vectorized> +inline interleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} + // b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} + // + // return: + // {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} + // {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} + __m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, + 19, 3, 18, 2, 17, 1, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, + 27, 11, 26, 10, 25, 9, 24, 8); + return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3} + // b = {a4, b4, a5, b5, a6, b6, a7, b7} + // output: + // return {a0, a1, a2, a3, a4, a5, a6, a7} + // {b0, b1, b2, b3, b4, b5, b6, b7} + // The members of indices have been written in binary format for better understandability + __m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0); + __m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1); + + return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); +} + +template <> +std::pair, Vectorized> +inline deinterleave2(const Vectorized& a, const Vectorized& b) { + // inputs: + // a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} + // b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} + // output: + // return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} + // {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} + __m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, + 14, 12, 10, 8, 6, 4, 2, 0); + __m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, + 15, 13, 11, 9, 7, 5, 3, 1); + + return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15); + return _mm512_permutexvar_ps(mask, v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); + return _mm512_permutexvar_pd(mask, v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); + return _mm512_permutexvar_epi64(mask, v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15); + return _mm512_permutexvar_epi32(mask, v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + const __m512i mask = _mm512_set_epi16( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 + ); + return _mm512_permutexvar_epi16(mask, v); +} + +inline __m512i flip8(const __m512i & v) { + const __m512i mask1 = _mm512_set_epi8( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + ); + const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6); + auto reversed_vec = _mm512_shuffle_epi8(v, mask1); + return _mm512_permutexvar_epi64(mask2, reversed_vec); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return flip8(v); +} + +template<> +inline Vectorized flip(const Vectorized & v) { + return flip8(v); +} + +#endif // defined(CPU_CAPABILITY_AVX512) + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h new file mode 100644 index 0000000000000000000000000000000000000000..c7132349418de02e95c7fedbd2a78eba5270c526 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h @@ -0,0 +1,1662 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#if defined(CPU_CAPABILITY_AVX512) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +#ifndef SLEEF_CONST +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define SLEEF_CONST const +#else +#define SLEEF_CONST +#endif +#define SLEEF_CONST_OLD SLEEF_CONST +#else +#define SLEEF_CONST_OLD +#endif + +// bfloat16 conversion +static inline void cvtbf16_fp32(const __m256i& a, __m512& o) { + o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16)); +} + +static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) { + __m256i lo = _mm512_extracti32x8_epi32(a, 0); + __m256i hi = _mm512_extracti32x8_epi32(a, 1); + cvtbf16_fp32(lo, o1); + cvtbf16_fp32(hi, o2); +} + +static inline __m256i cvtfp32_bf16(const __m512& src) { + __m512i value = _mm512_castps_si512(src); + __m512i nan = _mm512_set1_epi32(0xffff); + auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q); + __m512i ones = _mm512_set1_epi32(0x1); + __m512i vec_bias = _mm512_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_value = _mm512_add_epi32(t_value, vec_bias); + // input += rounding_bias; + t_value = _mm512_add_epi32(t_value, value); + // input = input >> 16; + t_value = _mm512_srli_epi32(t_value, 16); + // Check NaN before converting back to bf16 + t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value); + return _mm512_cvtusepi32_epi16(t_value); +} + +static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) { + __m512i lo = _mm512_castps_si512(a); + __m512i hi = _mm512_castps_si512(b); + __m512i nan = _mm512_set1_epi32(0xffff); + auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q); + auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q); + __m512i ones = _mm512_set1_epi32(0x1); + __m512i vec_bias = _mm512_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones); + auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_lo = _mm512_add_epi32(t_lo, vec_bias); + t_hi = _mm512_add_epi32(t_hi, vec_bias); + // input += rounding_bias; + t_lo = _mm512_add_epi32(t_lo, lo); + t_hi = _mm512_add_epi32(t_hi, hi); + // input = input >> 16; + t_lo = _mm512_srli_epi32(t_lo, 16); + t_hi = _mm512_srli_epi32(t_hi, 16); + // Check NaN before converting back to bf16 + t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo); + t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi); + + t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4] + __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0); + return _mm512_permutexvar_epi64(idx, t_lo); +} + +static inline __m512i merge_compare_result(const __m512& a, const __m512& b) { + __m512i lo = _mm512_castps_si512(a); + __m512i hi = _mm512_castps_si512(b); + lo = _mm512_srli_epi32(lo, 16); + hi = _mm512_srli_epi32(hi, 16); + auto out = _mm512_packus_epi32(lo, hi); + __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0); + return _mm512_permutexvar_epi64(idx, out); +} + +// float16 conversion +static inline void cvtfp16_fp32(const __m256i& a, __m512& o) { + o = _mm512_cvtph_ps(a); +} + +static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) { + __m256i lo = _mm512_extracti32x8_epi32(a, 0); + __m256i hi = _mm512_extracti32x8_epi32(a, 1); + cvtfp16_fp32(lo, o1); + cvtfp16_fp32(hi, o2); +} + +static inline __m256i cvtfp32_fp16(const __m512& src) { + return _mm512_cvtps_ph( + src, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); +} + +static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) { + __m256i lo = _mm512_cvtps_ph( + a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + __m256i hi = _mm512_cvtps_ph( + b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + __m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo)); + __m256 t_hi = _mm256_castsi256_ps(hi); + return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1)); +} + +// dtype conversion between float16/bfloat16 and float32 +template , int> = 0> +inline void cvt_to_fp32(const __m256i& a, __m512& o); +template <> inline void cvt_to_fp32(const __m256i& a, __m512& o) { + cvtbf16_fp32(a, o); +} +template <> inline void cvt_to_fp32(const __m256i& a, __m512& o) { + cvtfp16_fp32(a, o); +} + +template , int> = 0> +inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2); +template <> inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2) { + cvtbf16_fp32(a, o1, o2); +} +template <> inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2) { + cvtfp16_fp32(a, o1, o2); +} + +template , int> = 0> +inline __m512i cvt_from_fp32(const __m512& a, const __m512& b); +template <> inline __m512i cvt_from_fp32(const __m512& a, const __m512& b) { + return cvtfp32_bf16(a, b); +} +template <> inline __m512i cvt_from_fp32(const __m512& a, const __m512& b) { + return merge_compare_result(a, b); +} +template <> inline __m512i cvt_from_fp32(const __m512& a, const __m512& b) { + return cvtfp32_fp16(a, b); +} +template <> inline __m512i cvt_from_fp32(const __m512& a, const __m512& b) { + return cvtfp32_fp16(a, b); +} + +template +class Vectorized16 { +static_assert( + is_reduced_floating_point_v, + "Support only float16 and bfloat16."); +private: + __m512i values; +public: + using value_type = uint16_t; + using size_type = int; + static constexpr size_type size() { + return 32; + } + Vectorized16() {} + Vectorized16(__m512i v) : values(v) {} + Vectorized16(T val) { + value_type uw = val.x; + values = _mm512_set1_epi16(uw); + } + Vectorized16(T val1, T val2, T val3, T val4, + T val5, T val6, T val7, T val8, + T val9, T val10, T val11, T val12, + T val13, T val14, T val15, T val16, + T val17, T val18, T val19, T val20, + T val21, T val22, T val23, T val24, + T val25, T val26, T val27, T val28, + T val29, T val30, T val31, T val32) { + values = _mm512_set_epi16( + val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x, + val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x, + val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x, + val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x); + } + operator __m512i() const { + return values; + } + T& operator[](int idx) = delete; + const T& operator[](int idx) const = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0)); + } + static Vectorized loadu(const void* ptr, int16_t count = size()) { + if (count == size()) + return _mm512_loadu_si512(reinterpret_cast(ptr)); + + __mmask32 mask = (1ULL << count) - 1; + return _mm512_maskz_loadu_epi16(mask, ptr); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values); + } else if (count > 0) { + __mmask32 mask = (1ULL << count) - 1; + _mm512_mask_storeu_epi16(ptr, mask, values); + } + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + __at_align__ int16_t tmp_values[size()]; + a.store(tmp_values); + if (mask & 0x01) + tmp_values[0] = b.values[31]; + if (mask & 0x02) + tmp_values[1] = b.values[30]; + if (mask & 0x04) + tmp_values[2] = b.values[29]; + if (mask & 0x08) + tmp_values[3] = b.values[28]; + if (mask & 0x10) + tmp_values[4] = b.values[27]; + if (mask & 0x20) + tmp_values[5] = b.values[26]; + if (mask & 0x40) + tmp_values[6] = b.values[25]; + if (mask & 0x80) + tmp_values[7] = b.values[24]; + if (mask & 0x100) + tmp_values[8] = b.values[23]; + if (mask & 0x200) + tmp_values[9] = b.values[22]; + if (mask & 0x400) + tmp_values[10] = b.values[21]; + if (mask & 0x800) + tmp_values[11] = b.values[20]; + if (mask & 0x1000) + tmp_values[12] = b.values[19]; + if (mask & 0x2000) + tmp_values[13] = b.values[18]; + if (mask & 0x4000) + tmp_values[14] = b.values[17]; + if (mask & 0x8000) + tmp_values[15] = b.values[16]; + if (mask & 0x10000) + tmp_values[16] = b.values[15]; + if (mask & 0x20000) + tmp_values[17] = b.values[14]; + if (mask & 0x40000) + tmp_values[18] = b.values[13]; + if (mask & 0x80000) + tmp_values[19] = b.values[12]; + if (mask & 0x100000) + tmp_values[20] = b.values[11]; + if (mask & 0x200000) + tmp_values[21] = b.values[10]; + if (mask & 0x400000) + tmp_values[22] = b.values[9]; + if (mask & 0x800000) + tmp_values[23] = b.values[8]; + if (mask & 0x1000000) + tmp_values[24] = b.values[7]; + if (mask & 0x2000000) + tmp_values[25] = b.values[6]; + if (mask & 0x4000000) + tmp_values[26] = b.values[5]; + if (mask & 0x8000000) + tmp_values[27] = b.values[4]; + if (mask & 0x10000000) + tmp_values[28] = b.values[3]; + if (mask & 0x20000000) + tmp_values[29] = b.values[2]; + if (mask & 0x40000000) + tmp_values[30] = b.values[1]; + if (mask & 0x80000000) + tmp_values[31] = b.values[0]; + return loadu(tmp_values); + } + static Vectorized blendv(const Vectorized& a, + const Vectorized& b, const Vectorized& mask) { + auto all_ones = _mm512_set1_epi16(0xFFFF); + auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_epi16(mask_, a.values, b.values); + } + template + static Vectorized arange(T base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step, + base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step, + base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step, + base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step, + base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step); + } + static Vectorized set(const Vectorized& a, + const Vectorized& b, int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + case 16: + return blend<65535>(a, b); + case 17: + return blend<131071>(a, b); + case 18: + return blend<262143>(a, b); + case 19: + return blend<524287>(a, b); + case 20: + return blend<1048575>(a, b); + case 21: + return blend<2097151>(a, b); + case 22: + return blend<4194303>(a, b); + case 23: + return blend<8388607>(a, b); + case 24: + return blend<16777215>(a, b); + case 25: + return blend<33554431>(a, b); + case 26: + return blend<67108863>(a, b); + case 27: + return blend<134217727>(a, b); + case 28: + return blend<268435455>(a, b); + case 29: + return blend<536870911>(a, b); + case 30: + return blend<1073741823>(a, b); + case 31: + return blend<2147483647>(a, b); + } + return b; + } + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wignored-qualifiers" + + Vectorized map(SLEEF_CONST __m512 (*SLEEF_CONST_OLD vop)(__m512)) const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + const auto o1 = vop(lo); + const auto o2 = vop(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized isnan() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + __mmask16 lo_mask, hi_mask; + __m512 zero = _mm512_set1_ps(0.0); + __m512i zeroi = _mm512_castps_si512(zero); + lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q); + lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF)); + hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q); + hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF)); + return merge_compare_result(lo, hi); + } + #pragma clang diagnostic pop + Vectorized abs() const { + return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values); + } + Vectorized angle() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto angle_lambda = [](__m512 values) { + const auto zero_vec = _mm512_set1_ps(0.f); + const auto nan_vec = _mm512_set1_ps(NAN); + const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ); + const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec), + not_nan_mask, 0xFFFFFFFF); + const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_ps(c10::pi); + + const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec); + return angle; + }; + auto o1 = angle_lambda(lo); + auto o2 = angle_lambda(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_epi16(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return map(Sleef_acosf16_u10); + } + Vectorized acosh() const { + return map(Sleef_acoshf16_u10); + } + Vectorized asin() const { + return map(Sleef_asinf16_u10); + } + Vectorized atan() const { + return map(Sleef_atanf16_u10); + } + Vectorized atanh() const { + return map(Sleef_atanhf16_u10); + } + Vectorized atan2(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_atan2f16_u10(lo, b1); + auto o2 = Sleef_atan2f16_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized copysign(const Vectorized &sign) const { + // copy sign bit (0x8000) from sign and remaining bits from values + __m512i mask_value = _mm512_set1_epi32(~0x80008000); + __m512i mask_signbit = _mm512_set1_epi32(0x80008000); + return Vectorized( + _mm512_or_si512( + _mm512_and_si512(values, mask_value), + _mm512_and_si512(sign, mask_signbit))); + } + Vectorized erf() const { + return map(Sleef_erff16_u10); + } + Vectorized erfc() const { + return map(Sleef_erfcf16_u15); + } + Vectorized erfinv() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_erfinv(tmp1[i]); + tmp2[i] = calc_erfinv(tmp2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized exp() const { + return map(Sleef_expf16_u10); + } + Vectorized exp2() const { + return map(Sleef_exp2f16_u10); + } + Vectorized expm1() const { + return map(Sleef_expm1f16_u10); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized & q) const { + __m512 x_lo, x_hi; + cvt_to_fp32(values, x_lo, x_hi); + __m512 q_lo, q_hi; + cvtbf16_fp32(q.values, q_lo, q_hi); + auto o1 = Sleef_fmodf16(x_lo, q_lo); + auto o2 = Sleef_fmodf16(x_hi, q_hi); + return cvt_from_fp32(o1, o2); + } + Vectorized hypot(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_hypotf16_u05(lo, b1); + auto o2 = Sleef_hypotf16_u05(hi, b2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + for (int64_t i = 0; i < size() / 2; i++) { + tmp1[i] = calc_i0(tmp1[i]); + tmp2[i] = calc_i0(tmp2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized i0e() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_i0e(tmp1[i]); + tmp2[i] = calc_i0e(tmp2[i]); + } + const auto o1 = _mm512_loadu_ps(tmp1); + const auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized digamma() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + constexpr auto sz = size(); + __at_align__ float tmp1[sz / 2], tmp2[sz / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + + for (auto i = decltype(sz){0}; i < sz / 2; i++) { + tmp1[i] = calc_digamma(tmp1[i]); + tmp2[i] = calc_digamma(tmp2[i]); + } + const auto o1 = _mm512_loadu_ps(tmp1); + const auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized igamma(const Vectorized &x) const { + __m512 lo, hi; + __m512 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm512_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + + Vectorized igammac(const Vectorized &x) const { + __m512 lo, hi; + __m512 xlo, xhi; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(x.values, xlo, xhi); + __at_align__ float tmp1[size() / 2], tmp2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmp1), lo); + _mm512_storeu_ps(reinterpret_cast(tmp2), hi); + __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2]; + _mm512_storeu_ps(reinterpret_cast(tmpx1), xlo); + _mm512_storeu_ps(reinterpret_cast(tmpx2), xhi); + for (int64_t i = 0; i < size() / 2; ++i) { + tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]); + tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]); + } + auto o1 = _mm512_loadu_ps(tmp1); + auto o2 = _mm512_loadu_ps(tmp2); + return cvt_from_fp32(o1, o2); + } + Vectorized log() const { + return map(Sleef_logf16_u10); + } + Vectorized log2() const { + return map(Sleef_log2f16_u10); + } + Vectorized log10() const { + return map(Sleef_log10f16_u10); + } + Vectorized log1p() const { + return map(Sleef_log1pf16_u10); + } + Vectorized sin() const { + return map(Sleef_sinf16_u10); + } + Vectorized sinh() const { + return map(Sleef_sinhf16_u10); + } + Vectorized cos() const { + return map(Sleef_cosf16_u10); + } + Vectorized cosh() const { + return map(Sleef_coshf16_u10); + } + Vectorized ceil() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm512_ceil_ps(lo); + auto o2 = _mm512_ceil_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized floor() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm512_floor_ps(lo); + auto o2 = _mm512_floor_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized neg() const { + return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000)); + } + Vectorized round() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized tan() const { + return map(Sleef_tanf16_u10); + } + Vectorized tanh() const { + return map(Sleef_tanhf16_u10); + } + Vectorized trunc() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + return cvt_from_fp32(o1, o2); + } + Vectorized lgamma() const { + return map(Sleef_lgammaf16_u10); + } + Vectorized sqrt() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto o1 = _mm512_sqrt_ps(lo); + auto o2 = _mm512_sqrt_ps(hi); + return cvt_from_fp32(o1, o2); + } + Vectorized reciprocal() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm512_set1_ps(1); + auto o1 = _mm512_div_ps(ones, lo); + auto o2 = _mm512_div_ps(ones, hi); + return cvt_from_fp32(o1, o2); + } + Vectorized rsqrt() const { + __m512 lo, hi; + cvt_to_fp32(values, lo, hi); + auto ones = _mm512_set1_ps(1); + auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo)); + auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi)); + return cvt_from_fp32(o1, o2); + } + Vectorized pow(const Vectorized &b) const { + __m512 lo, hi; + __m512 b1, b2; + cvt_to_fp32(values, lo, hi); + cvt_to_fp32(b.values, b1, b2); + auto o1 = Sleef_powf16_u10(lo, b1); + auto o2 = Sleef_powf16_u10(hi, b2); + return cvt_from_fp32(o1, o2); + } +private: + template + Vectorized inline binary_compare(const Vectorized& b, Op op) const { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvt_to_fp32(values, a_lo, a_hi); + cvt_to_fp32(b.values, b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); + } + +public: + Vectorized inline operator>(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } + Vectorized inline operator<(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } + Vectorized inline operator>=(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } + Vectorized inline operator<=(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } + Vectorized inline operator==(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } + Vectorized inline operator!=(const Vectorized& other) const { + return binary_compare(other, [](__m512 x, __m512 y) { + auto zero_vec = _mm512_set1_epi32(0); + auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF)); + }); + } +}; + +template +static inline Vectorized binary_op_as_fp32(const Vectorized& a, const Vectorized& b, Op op) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvt_to_fp32(__m512i(a), a_lo, a_hi); + cvt_to_fp32(__m512i(b), b_lo, b_hi); + auto o1 = op(a_lo, b_lo); + auto o2 = op(a_hi, b_hi); + return cvt_from_fp32(o1, o2); +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); }); +} +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_si512(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_si512(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_si512(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto max_lo = _mm512_max_ps(a_lo, b_lo); + auto max_hi = _mm512_max_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask)); + auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(max_lo, nan_lo); + auto o2 = _mm512_or_ps(max_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512i zero_vec = _mm512_set1_epi32(0); + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + auto min_lo = _mm512_min_ps(a_lo, b_lo); + auto min_hi = _mm512_min_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask, + 0xFFFFFFFF)); + auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(min_lo, nan_lo); + auto o2 = _mm512_or_ps(min_hi, nan_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + __m512 max_lo, max_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(min), min_lo, min_hi); + cvtbf16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo)); + auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi)); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 max_lo, max_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, a_lo); + auto o2 = _mm512_min_ps(max_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(min), min_lo, min_hi); + auto o1 = _mm512_max_ps(min_lo, a_lo); + auto o2 = _mm512_max_ps(min_hi, a_hi); + return cvtfp32_bf16(o1, o2); +} + +template <> +inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i))); + _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, BFloat16* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = _mm512_loadu_ps(&src[i]); + __m512 b = _mm512_loadu_ps(&src[i + 16]); + + __m512i bf = cvtfp32_bf16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, BFloat16* dst, int64_t n) { + auto load_float = [](const double *src) -> __m512 { + // Load one float vector from an array of doubles + __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src)); + __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8)); + return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = load_float(&src[i]); + __m512 b = load_float(&src[i + 16]); + + __m512i bf = cvtfp32_bf16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512 c_lo, c_hi; + cvtbf16_fp32(__m512i(a), a_lo, a_hi); + cvtbf16_fp32(__m512i(b), b_lo, b_hi); + cvtbf16_fp32(__m512i(c), c_lo, c_hi); + auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_bf16(o1, o2); +} + +static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) { + __m512i r[8]; + // a0a1 a2a3 a4a5 a6a7 a8a9 a10a11 a12a13 a14a15 e0e1 e2e3 e4e5 e6e7 e8e9 e10e11 e12e13 e14e15 + // b0-b15 f0-f15 + // c0-c15 g0-g15 + // d0-d15 h0-h15 + // i0-i15 m0-m15 + // j0-j15 n0-n15 + // k0-k15 o0-o15 + // l0-l15 p0-p15 +#pragma unroll(4) + for (int i = 0; i < 4; i++) { + r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01); + r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01); + } + + // u0: a0a1 b0b1 a2a3 b2b3 a8a9 b8b9 a10a11 b10b11 e0e1 f0f1 e2e3 f2f3 e8e9 f8f9 e10e11 f10f11 + // u1: a4a5 b4b5 a6a7 b6b7 a12a13 b12b13 a14a15 b14b15 e4e5 f4f5 e6e7 f6f7 e12e13 f12f13 e14e15 f14f15 + // u2: c0c1 d0d1 c2c3 d2d3 c8c9 d8d9 c10c11 d10d11 g0g1 h0h1 g2g3 h2h3 g8g9 h8h9 g10g11 h10h11 + // u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15 + // i j m n + // k l o p +#pragma unroll(4) + for (int i = 0; i < 8; i += 2) { + u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]); + u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]); + } + + // r0: a0a1 b0b1 c0c1 d0d1 a8a9 b8b9 c8c9 d8d9 e0e1 f0f1 g0g1 h0h1 e8e9 f8f9 g8g9 h8h9 + // r1: a2a3 b2b3 c2c3 d2d3 a10a11 b10b11 c10c11 d10d11 e2e3 f2f3 g2g3 h2h3 e10e11 f10f11 g10g11 h10h11 + // r2: a4a5 b4b5 c4c5 d4b5 a12a13 b12b13 c12c13 d12d13 + // r3: a6a7 b6b7 c6c7 d6b7 a14a15 b14b15 c14c15 d14d15 + // r4: i j k l m n o p + r[0] = _mm512_unpacklo_epi64(u[0], u[2]); + r[1] = _mm512_unpackhi_epi64(u[0], u[2]); + r[2] = _mm512_unpacklo_epi64(u[1], u[3]); + r[3] = _mm512_unpackhi_epi64(u[1], u[3]); + r[4] = _mm512_unpacklo_epi64(u[4], u[6]); + r[5] = _mm512_unpackhi_epi64(u[4], u[6]); + r[6] = _mm512_unpacklo_epi64(u[5], u[7]); + r[7] = _mm512_unpackhi_epi64(u[5], u[7]); + + __m512i const1 = _mm512_set_epi32( + 0x00370035, + 0x00330031, + 0x00270025, + 0x00230021, + 0x00170015, + 0x00130011, + 0x00070005, + 0x00030001, + 0x00360034, + 0x00320030, + 0x00260024, + 0x00220020, + 0x00160014, + 0x00120010, + 0x00060004, + 0x00020000); + __m512i const2 = _mm512_set_epi32( + 0x003f003d, + 0x003b0039, + 0x002f002d, + 0x002b0029, + 0x001f001d, + 0x001b0019, + 0x000f000d, + 0x000b0009, + 0x003e003c, + 0x003a0038, + 0x002e002c, + 0x002a0028, + 0x001e001c, + 0x001a0018, + 0x000e000c, + 0x000a0008); + // merge values from two regs + // 0-- 1-- + // 8-- 9-- + // 2-- 3-- + // 10-- 11-- + // 4-- 5-- + // 12-- 13-- + // 6-- 7-- + // 14-- 15-- +#pragma unroll(4) + for (int i = 0; i < 4; i++) { + u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]); + u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]); + } +} + +// TODO(Leslie): Add the AVX2 Version of transpose_mxn for BFloat16 and Float16 +// Code referred to FBGEMM: +// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607 +template<> +inline void transpose_mxn( + const BFloat16* src, + int64_t ld_src, + BFloat16* dst, + int64_t ld_dst) { + __m256i t[16]; + // load from src to registers + // a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 + // b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 + // c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 + // d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + // e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15 + // f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 + // g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15 + // h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15 + // i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 + // j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15 + // k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15 + // l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15 + // m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15 + // n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15 + // o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15 + // p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 +#pragma unroll(16) + for (int i = 0; i < 16; i++) { + t[i] = _mm256_loadu_si256(reinterpret_cast(src + i * ld_src)); + } + + __m512i u[8]; + _transpose_mxn_half_16_16(t, u); + +#pragma unroll(8) + for (int i = 0; i < 8; i++) { + _mm256_storeu_si256( + reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst), + _mm512_extracti32x8_epi32(u[i], 0x0)); + _mm256_storeu_si256( + reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst), + _mm512_extracti32x8_epi32(u[i], 0x01)); + } +} + +// Code referred to FBGEMM: +// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607 +template<> +inline void transpose_mxn( + const Half* src, + int64_t ld_src, + Half* dst, + int64_t ld_dst) { + __m256i t[16]; + // load from src to registers + // Same matrix indices as above transpose_mxn +#pragma unroll(16) + for (int i = 0; i < 16; i++) { + t[i] = _mm256_loadu_si256(reinterpret_cast(src + i * ld_src)); + } + + __m512i u[8]; + _transpose_mxn_half_16_16(t, u); + +#pragma unroll(8) + for (int i = 0; i < 8; i++) { + _mm256_storeu_si256( + reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst), + _mm512_extracti32x8_epi32(u[i], 0x0)); + _mm256_storeu_si256( + reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst), + _mm512_extracti32x8_epi32(u[i], 0x01)); + } +} + +static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) { + // t[0]: 0 32 1 33 2 34 3 35 8 40 9 41 10 42 11 43 16 ... 59 + // t[1]: 4 36 5 37 6 38 7 39 12 44 13 45 14 46 15 47 20 ... 63 + // t[2]: 64 96 65 97 66 98 67 99 72 104 73 105 74 106 75 ... 123 + // t[3]: 68 100 69 101 70 102 71 103 76 108 77 109 78 110 79 111 84 ... 127 + // t[4]: 128 160 129 161 130 162 131 163 136 168 137 169 138 170 139 171 144 ... 187 + // t[5]: 132 164 133 165 134 166 135 167 140 172 141 173 142 174 143 175 148 ... 191 + // t[6]: 192 224 193 225 194 226 195 227 200 232 201 233 202 234 203 235 208 ... 251 + // t[7]: 196 228 197 229 198 230 199 231 204 236 205 237 206 238 207 239 212 ... 255 + // t[8]: 256 288 257 289 258 290 259 291 264 296 265 297 266 298 267 299 272 ... 315 + // t[9]: 260 292 261 293 262 294 263 295 268 300 269 301 270 302 271 303 276 ... 319 + // t[10]: 320 352 321 353 322 354 323 355 328 360 329 361 330 362 331 363 336 ... 379 + // t[11]: 324 356 325 357 326 358 327 359 332 364 333 365 334 366 335 367 340 ... 383 + // t[12]: 384 416 385 417 386 418 387 419 392 424 393 425 394 426 395 427 400 ... 443 + // t[13]: 388 420 389 421 390 422 391 423 396 428 397 429 398 430 399 431 404 ... 447 + // t[14]: 448 480 449 481 450 482 451 483 456 488 457 489 458 490 459 491 464 ... 507 + // t[15]: 452 484 453 485 454 486 455 487 460 492 461 493 462 494 463 495 468 ... 511 + // t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571 + // ... + // t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023 +#pragma unroll(16) + for (int i = 0; i < 16; ++i) { + d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]); + d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]); + } + + // t[0]: 0 32 64 96 1 33 65 97 8 40 72 104 9 41 73 105 16 ... 121 + // t[1]: 2 34 66 98 3 35 67 99 10 42 74 106 11 43 75 107 18 ... 123 + // t[2]: 4 36 68 100 5 37 69 101 12 44 76 108 13 45 77 109 20 ... 125 + // t[3]: 6 38 70 102 7 39 71 103 14 46 78 110 15 47 79 111 22 ... 127 + // t[4]: 128 160 192 224 129 161 193 225 136 168 200 232 137 169 201 233 144 ... 249 + // t[5]: 130 162 194 226 131 163 195 227 138 170 202 234 139 171 203 235 146 ... 251 + // t[6]: 132 164 196 228 133 165 197 229 140 172 204 236 141 173 205 237 148 ... 253 + // t[7]: 134 166 198 230 135 167 199 231 142 174 206 238 143 175 207 239 150 ... 255 + // t[8]: 256 288 320 352 257 289 321 353 264 296 328 360 265 297 329 361 272 ... 377 + // t[9]: 258 290 322 354 259 291 323 355 266 298 330 362 267 299 331 363 274 ... 379 + // t[10]: 260 292 324 356 261 293 325 357 268 300 332 364 269 301 333 365 276 ... 381 + // t[11]: 262 294 326 358 263 295 327 359 270 302 334 366 271 303 335 367 278 ... 383 + // t[12]: 384 416 448 480 385 417 449 481 392 424 456 488 393 425 457 489 400 ... 505 + // t[13]: 386 418 450 482 387 419 451 483 394 426 458 490 395 427 459 491 402 ... 507 + // t[14]: 388 420 452 484 389 421 453 485 396 428 460 492 397 429 461 493 404 ... 509 + // t[15]: 390 422 454 486 391 423 455 487 398 430 462 494 399 431 463 495 406 ... 511 + // t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633 + // ... + // t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023 +#pragma unroll(8) + for (int i = 0; i < 8; ++i) { + r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]); + r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]); + r[i * 4 + 2] = _mm512_unpacklo_epi32(d[i * 4 + 1], d[i * 4 + 3]); + r[i * 4 + 3] = _mm512_unpackhi_epi32(d[i * 4 + 1], d[i * 4 + 3]); + } + + // t[0]: 0 32 64 96 128 160 192 224 8 40 72 104 136 168 200 232 16 ... 248 + // t[1]: 1 33 65 97 129 161 193 225 9 41 73 105 137 169 201 233 17 ... 249 + // t[2]: 2 34 66 98 130 162 194 226 10 42 74 106 138 170 202 234 18 ... 250 + // t[3]: 3 35 67 99 131 163 195 227 11 43 75 107 139 171 203 235 19 ... 251 + // t[4]: 4 36 68 100 132 164 196 228 12 44 76 108 140 172 204 236 20 ... 252 + // t[5]: 5 37 69 101 133 165 197 229 13 45 77 109 141 173 205 237 21 ... 253 + // t[6]: 6 38 70 102 134 166 198 230 14 46 78 110 142 174 206 238 22 ... 254 + // t[7]: 7 39 71 103 135 167 199 231 15 47 79 111 143 175 207 239 23 ... 255 + // t[8]: 256 288 320 352 384 416 448 480 264 296 328 360 392 424 456 488 272 ... 504 + // t[9]: 257 289 321 353 385 417 449 481 265 297 329 361 393 425 457 489 273 ... 505 + // t[10]: 258 290 322 354 386 418 450 482 266 298 330 362 394 426 458 490 274 ... 506 + // t[11]: 259 291 323 355 387 419 451 483 267 299 331 363 395 427 459 491 275 ... 507 + // t[12]: 260 292 324 356 388 420 452 484 268 300 332 364 396 428 460 492 276 ... 508 + // t[13]: 261 293 325 357 389 421 453 485 269 301 333 365 397 429 461 493 277 ... 509 + // t[14]: 262 294 326 358 390 422 454 486 270 302 334 366 398 430 462 494 278 ... 510 + // t[15]: 263 295 327 359 391 423 455 487 271 303 335 367 399 431 463 495 279 ... 511 + // t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760 + // ... + // t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023 +#pragma unroll(4) + for (int i = 0; i < 4; ++i) { + d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]); + d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]); + d[i * 8 + 2] = _mm512_unpacklo_epi64(r[i * 8 + 1], r[i * 8 + 5]); + d[i * 8 + 3] = _mm512_unpackhi_epi64(r[i * 8 + 1], r[i * 8 + 5]); + d[i * 8 + 4] = _mm512_unpacklo_epi64(r[i * 8 + 2], r[i * 8 + 6]); + d[i * 8 + 5] = _mm512_unpackhi_epi64(r[i * 8 + 2], r[i * 8 + 6]); + d[i * 8 + 6] = _mm512_unpacklo_epi64(r[i * 8 + 3], r[i * 8 + 7]); + d[i * 8 + 7] = _mm512_unpackhi_epi64(r[i * 8 + 3], r[i * 8 + 7]); + } + + // t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 16 ... 496 + // t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 17 ... 497 + // t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 18 ... 498 + // t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 19 ... 499 + // t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 20 ... 500 + // t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 21 ... 501 + // t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 22 ... 502 + // t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 23 ... 503 + // t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 24 ... 504 + // t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 25 ... 505 + // t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 26 ... 506 + // t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 27 ... 507 + // t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 28 ... 508 + // t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 29 ... 509 + // t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 30 ... 510 + // t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 31 ... 511 + // t[16]: 512 544 576 608 640 672 704 736 768 800 832 864 896 928 960 992 528 ... 1008 + // ... + // t[31]: 527 559 591 623 655 687 719 751 783 815 847 879 911 943 975 1007 543 ... 1023 + __m512i const1 = _mm512_set_epi64( + 0x000000000000000d, + 0x000000000000000c, + 0x0000000000000005, + 0x0000000000000004, + 0x0000000000000009, + 0x0000000000000008, + 0x0000000000000001, + 0x0000000000000000); + __m512i const2 = _mm512_set_epi64( + 0x000000000000000f, + 0x000000000000000e, + 0x0000000000000007, + 0x0000000000000006, + 0x000000000000000b, + 0x000000000000000a, + 0x0000000000000003, + 0x0000000000000002); +#pragma unroll(8) + for (int i = 0; i < 8; ++i) { + r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]); + r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]); + r[i + 16] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const1, d[i + 24]); + r[i + 24] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const2, d[i + 24]); + } + + // t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 512 544 ... 992 + // t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 513 545 ... 993 + // t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 514 546 ... 994 + // t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 515 547 ... 995 + // t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 516 548 ... 996 + // t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 517 549 ... 997 + // t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 518 550 ... 998 + // t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 519 551 ... 999 + // t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 520 552 ... 1000 + // t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 521 553 ... 1001 + // t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 522 554 ... 1002 + // t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 523 555 ... 1003 + // t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 524 556 ... 1004 + // t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 525 557 ... 1005 + // t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 526 558 ... 1006 + // t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 527 559 ... 1007 + // t[16]: 16 48 80 112 144 176 208 240 272 304 336 368 400 432 464 496 528 560 ... 1008 + // ... + // t[31]: 31 63 95 127 159 191 223 255 287 319 351 383 415 447 479 511 543 575 ... 1023 + __m512i const3 = _mm512_set_epi64( + 0x000000000000000b, + 0x000000000000000a, + 0x0000000000000009, + 0x0000000000000008, + 0x0000000000000003, + 0x0000000000000002, + 0x0000000000000001, + 0x0000000000000000); + __m512i const4 = _mm512_set_epi64( + 0x000000000000000f, + 0x000000000000000e, + 0x000000000000000d, + 0x000000000000000c, + 0x0000000000000007, + 0x0000000000000006, + 0x0000000000000005, + 0x0000000000000004); +#pragma unroll(16) + for (int i = 0; i < 16; ++i) { + d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]); + d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]); + } +} + +// Code referred to FBGEMM: +// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6 +template<> +inline void transpose_mxn( + const BFloat16* src, + int64_t ld_src, + BFloat16* dst, + int64_t ld_dst) { + // Load from memory + __m512i r[32]; +#pragma unroll(32) + for (int i = 0; i < 32; ++i) { + r[i] = _mm512_loadu_si512(reinterpret_cast(src + i* ld_src)); + } + + __m512i d[32]; + _transpose_mxn_half_32_32(r, d); + + // Store to dst +#pragma unroll(32) + for (int i = 0; i < 32; ++i) { + _mm512_storeu_si512(dst + i* ld_dst, d[i]); + } +} + +template<> +inline void transpose_mxn( + const Half* src, + int64_t ld_src, + Half* dst, + int64_t ld_dst) { + // Load from memory + __m512i r[32]; +#pragma unroll(32) + for (int i = 0; i < 32; ++i) { + r[i] = _mm512_loadu_si512(reinterpret_cast(src + i* ld_src)); + } + + __m512i d[32]; + _transpose_mxn_half_32_32(r, d); + + // Store to dst +#pragma unroll(32) + for (int i = 0; i < 32; ++i) { + _mm512_storeu_si512(dst + i* ld_dst, d[i]); + } +} + +template <> +class Vectorized: public Vectorized16 { +public: + using Vectorized16::Vectorized16; + + Vectorized frac() const; + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); }); +} +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); }); +} +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); }); +} +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); }); +} + +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_si512(a, b); +} +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_si512(a, b); +} +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_si512(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(b), b_lo, b_hi); + auto max_lo = _mm512_max_ps(a_lo, b_lo); + auto max_hi = _mm512_max_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask)); + auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(max_lo, nan_lo); + auto o2 = _mm512_or_ps(max_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512i zero_vec = _mm512_set1_epi32(0); + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(b), b_lo, b_hi); + auto min_lo = _mm512_min_ps(a_lo, b_lo); + auto min_hi = _mm512_min_ps(a_hi, b_hi); + auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q); + auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q); + auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask, + 0xFFFFFFFF)); + auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + auto o1 = _mm512_or_ps(min_lo, nan_lo); + auto o2 = _mm512_or_ps(min_hi, nan_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp(const Vectorized& a, + const Vectorized& min, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + __m512 max_lo, max_hi; + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(min), min_lo, min_hi); + cvtfp16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo)); + auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi)); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + __m512 a_lo, a_hi; + __m512 max_lo, max_hi; + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(max), max_lo, max_hi); + auto o1 = _mm512_min_ps(max_lo, a_lo); + auto o2 = _mm512_min_ps(max_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + __m512 a_lo, a_hi; + __m512 min_lo, min_hi; + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(min), min_lo, min_hi); + auto o1 = _mm512_max_ps(min_lo, a_lo); + auto o2 = _mm512_max_ps(min_hi, a_hi); + return cvtfp32_fp16(o1, o2); +} + +template <> +inline void convert(const Half* src, Half* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i))); + _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +inline void convert(const float* src, Half* dst, int64_t n) { + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = _mm512_loadu_ps(&src[i]); + __m512 b = _mm512_loadu_ps(&src[i + 16]); + + __m512i bf = cvtfp32_fp16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +inline void convert(const double* src, Half* dst, int64_t n) { + auto load_float = [](const double *src) -> __m512 { + // Load one float vector from an array of doubles + __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src)); + __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8)); + return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1); + }; + + int64_t i; + for (i = 0; i + Vectorized::size() <= n; i += Vectorized::size()) { + __m512 a = load_float(&src[i]); + __m512 b = load_float(&src[i + 16]); + + __m512i bf = cvtfp32_fp16(a, b); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf); + } + for (; i < n; i++) { + dst[i] = c10::convert(src[i]); + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, + const Vectorized& b, const Vectorized& c) { + __m512 a_lo, a_hi; + __m512 b_lo, b_hi; + __m512 c_lo, c_hi; + cvtfp16_fp32(__m512i(a), a_lo, a_hi); + cvtfp16_fp32(__m512i(b), b_lo, b_hi); + cvtfp16_fp32(__m512i(c), c_lo, c_hi); + auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo); + auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi); + return cvtfp32_fp16(o1, o2); +} + +#define CONVERT_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + __m512 o1, o2; \ + cvt_to_fp32(__m512i(a), o1, o2); \ + return std::make_tuple(o1, o2); \ +} \ +\ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + return cvt_from_fp32(__m512(a), __m512(b)); \ +} +CONVERT_VECTORIZED_INIT(BFloat16, bfloat16); +CONVERT_VECTORIZED_INIT(Half, half); + +#else //defined(CPU_CAPABILITY_AVX512) + +#define CONVERT_NON_VECTORIZED_INIT(type, name) \ +inline std::tuple, Vectorized> convert_##name##_float(const Vectorized& a) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr2); \ + for (const auto k : c10::irange(K)) { \ + arr[k] = c10::convert(arr2[k]); \ + } \ + return std::make_tuple( \ + Vectorized::loadu(arr), \ + Vectorized::loadu(arr + Vectorized::size())); \ +} \ +\ +inline Vectorized convert_float_##name(const Vectorized& a, const Vectorized& b) { \ + constexpr int64_t K = Vectorized::size(); \ + __at_align__ float arr[K]; \ + __at_align__ type arr2[K]; \ + a.store(arr); \ + b.store(arr + Vectorized::size()); \ + for (const auto k : c10::irange(K)) { \ + arr2[k] = c10::convert(arr[k]); \ + } \ + return Vectorized::loadu(arr2); \ +} +CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16); +CONVERT_NON_VECTORIZED_INIT(Half, half); + +#endif // defined(CPU_CAPABILITY_AVX512) + +#if defined(CPU_CAPABILITY_AVX512) +#define LOAD_FP32_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + auto values = _mm256_loadu_si256(reinterpret_cast(data)); \ + __m512 out_values; \ + cvt_to_fp32(values, out_values); \ + out = out_values; \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + auto vec = Vectorized::loadu(data); \ + __m512 out1_values, out2_values; \ + cvt_to_fp32(vec, out1_values, out2_values); \ + out1 = out1_values; \ + out2 = out2_values; \ +} +LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_VECTORIZED_INIT(Half, fp16); + +#else // defined(CPU_CAPABILITY_AVX512) +#define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \ +inline void load_fp32_from_##name(const type *data, Vectorized& out) { \ + __at_align__ float values[Vectorized::size()]; \ + for (const auto k : c10::irange(Vectorized::size())) { \ + values[k] = data[k]; \ + } \ + out = Vectorized::loadu(values); \ +} \ +\ +inline void load_fp32_from_##name(const type *data, Vectorized& out1, Vectorized& out2) { \ + load_fp32_from_##name(data, out1); \ + data += Vectorized::size(); \ + load_fp32_from_##name(data, out2); \ +} +LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16); +LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16); + +#endif +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h new file mode 100644 index 0000000000000000000000000000000000000000..c35204f9da23e68f545b2db65ae6a2e2542ae2a7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h @@ -0,0 +1,513 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +template <> class Vectorized> { +private: + __m512d values; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 4; + } + Vectorized() {} + Vectorized(__m512d v) : values(v) {} + Vectorized(c10::complex val) { + double real_value = val.real(); + double imag_value = val.imag(); + values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value, + real_value, imag_value, real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2, + c10::complex val3, c10::complex val4) { + values = _mm512_setr_pd(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag()); + } + operator __m512d() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, + const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + // NOLINTNEXTLINE(clang-diagnostic-warning) + switch (mask) { + case 0: + return a; + case 1: + return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011 + case 2: + return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100 + case 3: + return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111 + case 4: + return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000 + case 5: + return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011 + case 6: + return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100 + case 7: + return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111 + case 8: + return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000 + case 9: + return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011 + case 10: + return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100 + case 11: + return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111 + case 12: + return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000 + case 13: + return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011 + case 14: + return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100 + case 15: + return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111 + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, + const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values); + auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mmask, a.values, b.values); + } + template + static Vectorized> arange(c10::complex base = 0., + step_t step = static_cast(1)) { + return Vectorized>(base, + base + c10::complex(1)*step, + base + c10::complex(2)*step, + base + c10::complex(3)*step); + } + static Vectorized> set(const Vectorized>& a, + const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_pd(reinterpret_cast(ptr)); + + __at_align__ double tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm512_load_pd(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + double tmp_values[2*size()]; + _mm512_storeu_pd(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + // AVX512 doesn't have horizontal add & horizontal sub instructions. + // TODO: hadd_pd() & hsub_pd() may have scope for improvement. + static inline __m512d hadd_pd(__m512d a, __m512d b) { + __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1); + return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); + } + static inline __m512d hsub_pd(__m512d a, __m512d b) { + __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0); + __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1); + return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), + _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); + } + __m512d abs_2_() const { + auto val_2 = _mm512_mul_pd(values, values); // a*a b*b + return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b + } + __m512d abs_() const { + auto real = _mm512_movedup_pd(values); // real real + // movehdup_pd does not exist... + auto imag = _mm512_permute_pd(values, 0xff); // imag imag + return Sleef_hypotd8_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm512_and_pd(abs_(), real_mask); // abs 0 + } + __m512d angle_() const { + //angle = atan2(b/a) + auto b_a = _mm512_permute_pd(values, 0x55); // b a + return Sleef_atan2d8_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle + return _mm512_and_pd(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm512_setzero_pd(); + auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ); + auto div = _mm512_div_pd(values, abs); + return _mm512_mask_blend_pd(mask, div, zero); + } + __m512d real_() const { + const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000, + 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)); + return _mm512_and_pd(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m512d imag_() const { + const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF, + 0x0000000000000000, 0xFFFFFFFFFFFFFFFF)); + return _mm512_and_pd(values, imag_mask); + } + Vectorized> imag() const { + return _mm512_permute_pd(imag_(), 0x55); //b a + } + __m512d conj_() const { + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm512_xor_pd(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m512d log2_ = _mm512_set1_pd(std::log(2)); + return _mm512_div_pd(log(), log2_); + } + Vectorized> log10() const { + const __m512d log10_ = _mm512_set1_pd(std::log(10)); + return _mm512_div_pd(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m512d one = _mm512_set1_pd(1); + + auto conj = conj_(); + auto b_a = _mm512_permute_pd(conj, 0x55); //-b a + auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab + auto im = _mm512_add_pd(ab, ab); //-2ab -2ab + + auto val_2 = _mm512_mul_pd(values, values); // a*a b*b + auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a + re = _mm512_sub_pd(one, re); + + auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln() + } + Vectorized> acos() const { + // acos(x) = pi/2 - asin(x) + constexpr auto pi_2d = c10::pi / 2; + const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0); + return _mm512_sub_pd(pi_2, asin()); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expd8_u10(values); //exp(a) exp(b) + exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55), + sin_cos.x); //cos(b) sin(b) + return _mm512_mul_pd(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m512d ln_2 = _mm512_set1_pd(c10::ln_2); + Vectorized> scaled_values = _mm512_mul_pd(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm512_ceil_pd(values); + } + Vectorized> floor() const { + return _mm512_floor_pd(values); + } + Vectorized> neg() const { + auto zero = _mm512_setzero_pd(); + return _mm512_sub_pd(zero, values); + } + Vectorized> round() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask, + 0xFFFFFFFFFFFFFFFF)); + } + Vectorized> operator!=(const Vectorized>& other) const { + auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask, + 0xFFFFFFFFFFFFFFFF)); + } + Vectorized> operator<(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_add_pd(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_sub_pd(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, + const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm512_mul_pd(a, b); //ac bd + + auto d_c = _mm512_permute_pd(b, 0x55); //d c + d_c = _mm512_xor_pd(sign_mask, d_c); //d -c + auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc + + auto ret = Vectorized>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, + const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm512_set1_pd(-0.f); + auto fabs_cd = _mm512_andnot_pd(mask, b); // |c| |d| + auto fabs_dc = _mm512_permute_pd(fabs_cd, 0x55); // |d| |c| + auto scale = _mm512_rcp14_pd(_mm512_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm512_mul_pd(a, scale); // a/sc b/sc + auto b2 = _mm512_mul_pd(b, scale); // c/sc d/sc + auto acbd2 = _mm512_mul_pd(a2, b2); + + const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm512_permute_pd(b2, 0x55); // d/sc c/sc + dc2 = _mm512_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm512_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = Vectorized>::hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm512_div_pd(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const{ + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm512_xor_pd(sign_mask, values); //c -d + return _mm512_div_pd(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b + auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vec = _mm512_set1_epi64(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm512_mask_blend_pd(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF); + return _mm512_or_pd(max, _mm512_castsi512_pd(isnan)); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vec = _mm512_set1_epi64(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm512_mask_blend_pd(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF); + return _mm512_or_pd(min, _mm512_castsi512_pd(isnan)); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_and_pd(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_or_pd(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_xor_pd(a, b); +} + +inline Vectorized> Vectorized>::eq(const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm512_set1_pd(1.0)); +} + +inline Vectorized> Vectorized>::ne(const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm512_set1_pd(1.0)); +} + +#endif + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h new file mode 100644 index 0000000000000000000000000000000000000000..2801e484d94ce4d9af11455c93b69f2d1a81ce67 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h @@ -0,0 +1,1019 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +template <> class Vectorized> { +private: + __m512 values; + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + using value_type = c10::complex; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m512 v) : values(v) {} + Vectorized(c10::complex val) { + float real_value = val.real(); + float imag_value = val.imag(); + values = _mm512_setr_ps(real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value, + real_value, imag_value); + } + Vectorized(c10::complex val1, c10::complex val2, + c10::complex val3, c10::complex val4, + c10::complex val5, c10::complex val6, + c10::complex val7, c10::complex val8) { + values = _mm512_setr_ps(val1.real(), val1.imag(), + val2.real(), val2.imag(), + val3.real(), val3.imag(), + val4.real(), val4.imag(), + val5.real(), val5.imag(), + val6.real(), val6.imag(), + val7.real(), val7.imag(), + val8.real(), val8.imag()); + } + operator __m512() const { + return values; + } + template + static Vectorized> blend(const Vectorized>& a, + const Vectorized>& b) { + // convert c10::complex index mask to V index mask: xy -> xxyy + static_assert(mask > -1 && mask < 256, "Unexpected mask value"); + // The compiler would hopefully convert this switch condition + // into a jump table + switch (mask) { + case 0: + return a; + case 1: + return _mm512_mask_blend_ps(0x03, a.values, b.values); + case 2: + return _mm512_mask_blend_ps(0x0C, a.values, b.values); + case 3: + return _mm512_mask_blend_ps(0x0F, a.values, b.values); + case 4: + return _mm512_mask_blend_ps(0x30, a.values, b.values); + case 5: + return _mm512_mask_blend_ps(0x33, a.values, b.values); + case 6: + return _mm512_mask_blend_ps(0x3C, a.values, b.values); + case 7: + return _mm512_mask_blend_ps(0x3F, a.values, b.values); + case 8: + return _mm512_mask_blend_ps(0xC0, a.values, b.values); + case 9: + return _mm512_mask_blend_ps(0xC3, a.values, b.values); + case 10: + return _mm512_mask_blend_ps(0xCC, a.values, b.values); + case 11: + return _mm512_mask_blend_ps(0xCF, a.values, b.values); + case 12: + return _mm512_mask_blend_ps(0xF0, a.values, b.values); + case 13: + return _mm512_mask_blend_ps(0xF3, a.values, b.values); + case 14: + return _mm512_mask_blend_ps(0xFC, a.values, b.values); + case 15: + return _mm512_mask_blend_ps(0xFF, a.values, b.values); + case 16: + return _mm512_mask_blend_ps(0x300, a.values, b.values); + case 17: + return _mm512_mask_blend_ps(0x303, a.values, b.values); + case 18: + return _mm512_mask_blend_ps(0x30C, a.values, b.values); + case 19: + return _mm512_mask_blend_ps(0x30F, a.values, b.values); + case 20: + return _mm512_mask_blend_ps(0x330, a.values, b.values); + case 21: + return _mm512_mask_blend_ps(0x333, a.values, b.values); + case 22: + return _mm512_mask_blend_ps(0x33C, a.values, b.values); + case 23: + return _mm512_mask_blend_ps(0x33F, a.values, b.values); + case 24: + return _mm512_mask_blend_ps(0x3C0, a.values, b.values); + case 25: + return _mm512_mask_blend_ps(0x3C3, a.values, b.values); + case 26: + return _mm512_mask_blend_ps(0x3CC, a.values, b.values); + case 27: + return _mm512_mask_blend_ps(0x3CF, a.values, b.values); + case 28: + return _mm512_mask_blend_ps(0x3F0, a.values, b.values); + case 29: + return _mm512_mask_blend_ps(0x3F3, a.values, b.values); + case 30: + return _mm512_mask_blend_ps(0x3FC, a.values, b.values); + case 31: + return _mm512_mask_blend_ps(0x3FF, a.values, b.values); + case 32: + return _mm512_mask_blend_ps(0xC00, a.values, b.values); + case 33: + return _mm512_mask_blend_ps(0xC03, a.values, b.values); + case 34: + return _mm512_mask_blend_ps(0xC0C, a.values, b.values); + case 35: + return _mm512_mask_blend_ps(0xC0F, a.values, b.values); + case 36: + return _mm512_mask_blend_ps(0xC30, a.values, b.values); + case 37: + return _mm512_mask_blend_ps(0xC33, a.values, b.values); + case 38: + return _mm512_mask_blend_ps(0xC3C, a.values, b.values); + case 39: + return _mm512_mask_blend_ps(0xC3F, a.values, b.values); + case 40: + return _mm512_mask_blend_ps(0xCC0, a.values, b.values); + case 41: + return _mm512_mask_blend_ps(0xCC3, a.values, b.values); + case 42: + return _mm512_mask_blend_ps(0xCCC, a.values, b.values); + case 43: + return _mm512_mask_blend_ps(0xCCF, a.values, b.values); + case 44: + return _mm512_mask_blend_ps(0xCF0, a.values, b.values); + case 45: + return _mm512_mask_blend_ps(0xCF3, a.values, b.values); + case 46: + return _mm512_mask_blend_ps(0xCFC, a.values, b.values); + case 47: + return _mm512_mask_blend_ps(0xCFF, a.values, b.values); + case 48: + return _mm512_mask_blend_ps(0xF00, a.values, b.values); + case 49: + return _mm512_mask_blend_ps(0xF03, a.values, b.values); + case 50: + return _mm512_mask_blend_ps(0xF0C, a.values, b.values); + case 51: + return _mm512_mask_blend_ps(0xF0F, a.values, b.values); + case 52: + return _mm512_mask_blend_ps(0xF30, a.values, b.values); + case 53: + return _mm512_mask_blend_ps(0xF33, a.values, b.values); + case 54: + return _mm512_mask_blend_ps(0xF3C, a.values, b.values); + case 55: + return _mm512_mask_blend_ps(0xF3F, a.values, b.values); + case 56: + return _mm512_mask_blend_ps(0xFC0, a.values, b.values); + case 57: + return _mm512_mask_blend_ps(0xFC3, a.values, b.values); + case 58: + return _mm512_mask_blend_ps(0xFCC, a.values, b.values); + case 59: + return _mm512_mask_blend_ps(0xFCF, a.values, b.values); + case 60: + return _mm512_mask_blend_ps(0xFF0, a.values, b.values); + case 61: + return _mm512_mask_blend_ps(0xFF3, a.values, b.values); + case 62: + return _mm512_mask_blend_ps(0xFFC, a.values, b.values); + case 63: + return _mm512_mask_blend_ps(0xFFF, a.values, b.values); + case 64: + return _mm512_mask_blend_ps(0x3000, a.values, b.values); + case 65: + return _mm512_mask_blend_ps(0x3003, a.values, b.values); + case 66: + return _mm512_mask_blend_ps(0x300C, a.values, b.values); + case 67: + return _mm512_mask_blend_ps(0x300F, a.values, b.values); + case 68: + return _mm512_mask_blend_ps(0x3030, a.values, b.values); + case 69: + return _mm512_mask_blend_ps(0x3033, a.values, b.values); + case 70: + return _mm512_mask_blend_ps(0x303C, a.values, b.values); + case 71: + return _mm512_mask_blend_ps(0x303F, a.values, b.values); + case 72: + return _mm512_mask_blend_ps(0x30C0, a.values, b.values); + case 73: + return _mm512_mask_blend_ps(0X30C3, a.values, b.values); + case 74: + return _mm512_mask_blend_ps(0x30CC, a.values, b.values); + case 75: + return _mm512_mask_blend_ps(0x30CF, a.values, b.values); + case 76: + return _mm512_mask_blend_ps(0x30F0, a.values, b.values); + case 77: + return _mm512_mask_blend_ps(0x30F3, a.values, b.values); + case 78: + return _mm512_mask_blend_ps(0x30FC, a.values, b.values); + case 79: + return _mm512_mask_blend_ps(0x30FF, a.values, b.values); + case 80: + return _mm512_mask_blend_ps(0x3300, a.values, b.values); + case 81: + return _mm512_mask_blend_ps(0X3303, a.values, b.values); + case 82: + return _mm512_mask_blend_ps(0x330C, a.values, b.values); + case 83: + return _mm512_mask_blend_ps(0x330F, a.values, b.values); + case 84: + return _mm512_mask_blend_ps(0x3330, a.values, b.values); + case 85: + return _mm512_mask_blend_ps(0x3333, a.values, b.values); + case 86: + return _mm512_mask_blend_ps(0x333C, a.values, b.values); + case 87: + return _mm512_mask_blend_ps(0X333F, a.values, b.values); + case 88: + return _mm512_mask_blend_ps(0x33C0, a.values, b.values); + case 89: + return _mm512_mask_blend_ps(0x33C3, a.values, b.values); + case 90: + return _mm512_mask_blend_ps(0x33CC, a.values, b.values); + case 91: + return _mm512_mask_blend_ps(0x33CF, a.values, b.values); + case 92: + return _mm512_mask_blend_ps(0x33F0, a.values, b.values); + case 93: + return _mm512_mask_blend_ps(0x33F3, a.values, b.values); + case 94: + return _mm512_mask_blend_ps(0x33FC, a.values, b.values); + case 95: + return _mm512_mask_blend_ps(0x33FF, a.values, b.values); + case 96: + return _mm512_mask_blend_ps(0X3C00, a.values, b.values); + case 97: + return _mm512_mask_blend_ps(0x3C03, a.values, b.values); + case 98: + return _mm512_mask_blend_ps(0x3C0C, a.values, b.values); + case 99: + return _mm512_mask_blend_ps(0x3C0F, a.values, b.values); + case 100: + return _mm512_mask_blend_ps(0x3C30, a.values, b.values); + case 101: + return _mm512_mask_blend_ps(0x3C33, a.values, b.values); + case 102: + return _mm512_mask_blend_ps(0x3C3C, a.values, b.values); + case 103: + return _mm512_mask_blend_ps(0x3C3F, a.values, b.values); + case 104: + return _mm512_mask_blend_ps(0x3CC0, a.values, b.values); + case 105: + return _mm512_mask_blend_ps(0x3CC3, a.values, b.values); + case 106: + return _mm512_mask_blend_ps(0x3CCC, a.values, b.values); + case 107: + return _mm512_mask_blend_ps(0x3CCF, a.values, b.values); + case 108: + return _mm512_mask_blend_ps(0x3CF0, a.values, b.values); + case 109: + return _mm512_mask_blend_ps(0x3CF3, a.values, b.values); + case 110: + return _mm512_mask_blend_ps(0x3CFC, a.values, b.values); + case 111: + return _mm512_mask_blend_ps(0x3CFF, a.values, b.values); + case 112: + return _mm512_mask_blend_ps(0x3F00, a.values, b.values); + case 113: + return _mm512_mask_blend_ps(0x3F03, a.values, b.values); + case 114: + return _mm512_mask_blend_ps(0x3F0C, a.values, b.values); + case 115: + return _mm512_mask_blend_ps(0x3F0F, a.values, b.values); + case 116: + return _mm512_mask_blend_ps(0x3F30, a.values, b.values); + case 117: + return _mm512_mask_blend_ps(0x3F33, a.values, b.values); + case 118: + return _mm512_mask_blend_ps(0x3F3C, a.values, b.values); + case 119: + return _mm512_mask_blend_ps(0x3F3F, a.values, b.values); + case 120: + return _mm512_mask_blend_ps(0x3FC0, a.values, b.values); + case 121: + return _mm512_mask_blend_ps(0x3FC3, a.values, b.values); + case 122: + return _mm512_mask_blend_ps(0x3FCC, a.values, b.values); + case 123: + return _mm512_mask_blend_ps(0x3FCF, a.values, b.values); + case 124: + return _mm512_mask_blend_ps(0x3FF0, a.values, b.values); + case 125: + return _mm512_mask_blend_ps(0x3FF3, a.values, b.values); + case 126: + return _mm512_mask_blend_ps(0x3FFC, a.values, b.values); + case 127: + return _mm512_mask_blend_ps(0x3FFF, a.values, b.values); + case 128: + return _mm512_mask_blend_ps(0xC000, a.values, b.values); + case 129: + return _mm512_mask_blend_ps(0xC003, a.values, b.values); + case 130: + return _mm512_mask_blend_ps(0xC00C, a.values, b.values); + case 131: + return _mm512_mask_blend_ps(0xC00F, a.values, b.values); + case 132: + return _mm512_mask_blend_ps(0xC030, a.values, b.values); + case 133: + return _mm512_mask_blend_ps(0xC033, a.values, b.values); + case 134: + return _mm512_mask_blend_ps(0xC03C, a.values, b.values); + case 135: + return _mm512_mask_blend_ps(0xC03F, a.values, b.values); + case 136: + return _mm512_mask_blend_ps(0xC0C0, a.values, b.values); + case 137: + return _mm512_mask_blend_ps(0xC0C3, a.values, b.values); + case 138: + return _mm512_mask_blend_ps(0xC0CC, a.values, b.values); + case 139: + return _mm512_mask_blend_ps(0xC0CF, a.values, b.values); + case 140: + return _mm512_mask_blend_ps(0xC0F0, a.values, b.values); + case 141: + return _mm512_mask_blend_ps(0xC0F3, a.values, b.values); + case 142: + return _mm512_mask_blend_ps(0xC0FC, a.values, b.values); + case 143: + return _mm512_mask_blend_ps(0xC0FF, a.values, b.values); + case 144: + return _mm512_mask_blend_ps(0xC300, a.values, b.values); + case 145: + return _mm512_mask_blend_ps(0xC303, a.values, b.values); + case 146: + return _mm512_mask_blend_ps(0xC30C, a.values, b.values); + case 147: + return _mm512_mask_blend_ps(0xC30F, a.values, b.values); + case 148: + return _mm512_mask_blend_ps(0xC330, a.values, b.values); + case 149: + return _mm512_mask_blend_ps(0xC333, a.values, b.values); + case 150: + return _mm512_mask_blend_ps(0xC33C, a.values, b.values); + case 151: + return _mm512_mask_blend_ps(0xC33F, a.values, b.values); + case 152: + return _mm512_mask_blend_ps(0xC3C0, a.values, b.values); + case 153: + return _mm512_mask_blend_ps(0xC3C3, a.values, b.values); + case 154: + return _mm512_mask_blend_ps(0xC3CC, a.values, b.values); + case 155: + return _mm512_mask_blend_ps(0xC3CF, a.values, b.values); + case 156: + return _mm512_mask_blend_ps(0xC3F0, a.values, b.values); + case 157: + return _mm512_mask_blend_ps(0xC3F3, a.values, b.values); + case 158: + return _mm512_mask_blend_ps(0xC3FC, a.values, b.values); + case 159: + return _mm512_mask_blend_ps(0xC3FF, a.values, b.values); + case 160: + return _mm512_mask_blend_ps(0xCC00, a.values, b.values); + case 161: + return _mm512_mask_blend_ps(0xCC03, a.values, b.values); + case 162: + return _mm512_mask_blend_ps(0xCC0C, a.values, b.values); + case 163: + return _mm512_mask_blend_ps(0xCC0F, a.values, b.values); + case 164: + return _mm512_mask_blend_ps(0xCC30, a.values, b.values); + case 165: + return _mm512_mask_blend_ps(0xCC33, a.values, b.values); + case 166: + return _mm512_mask_blend_ps(0xCC3C, a.values, b.values); + case 167: + return _mm512_mask_blend_ps(0xCC3F, a.values, b.values); + case 168: + return _mm512_mask_blend_ps(0xCCC0, a.values, b.values); + case 169: + return _mm512_mask_blend_ps(0xCCC3, a.values, b.values); + case 170: + return _mm512_mask_blend_ps(0xCCCC, a.values, b.values); + case 171: + return _mm512_mask_blend_ps(0xCCCF, a.values, b.values); + case 172: + return _mm512_mask_blend_ps(0xCCF0, a.values, b.values); + case 173: + return _mm512_mask_blend_ps(0xCCF3, a.values, b.values); + case 174: + return _mm512_mask_blend_ps(0xCCFC, a.values, b.values); + case 175: + return _mm512_mask_blend_ps(0xCCFF, a.values, b.values); + case 176: + return _mm512_mask_blend_ps(0xCF00, a.values, b.values); + case 177: + return _mm512_mask_blend_ps(0xCF03, a.values, b.values); + case 178: + return _mm512_mask_blend_ps(0xCF0C, a.values, b.values); + case 179: + return _mm512_mask_blend_ps(0xCF0F, a.values, b.values); + case 180: + return _mm512_mask_blend_ps(0xCF30, a.values, b.values); + case 181: + return _mm512_mask_blend_ps(0xCF33, a.values, b.values); + case 182: + return _mm512_mask_blend_ps(0xCF3C, a.values, b.values); + case 183: + return _mm512_mask_blend_ps(0xCF3F, a.values, b.values); + case 184: + return _mm512_mask_blend_ps(0xCFC0, a.values, b.values); + case 185: + return _mm512_mask_blend_ps(0xCFC3, a.values, b.values); + case 186: + return _mm512_mask_blend_ps(0xCFCC, a.values, b.values); + case 187: + return _mm512_mask_blend_ps(0xCFCF, a.values, b.values); + case 188: + return _mm512_mask_blend_ps(0xCFF0, a.values, b.values); + case 189: + return _mm512_mask_blend_ps(0xCFF3, a.values, b.values); + case 190: + return _mm512_mask_blend_ps(0xCFFC, a.values, b.values); + case 191: + return _mm512_mask_blend_ps(0xCFFF, a.values, b.values); + case 192: + return _mm512_mask_blend_ps(0xF000, a.values, b.values); + case 193: + return _mm512_mask_blend_ps(0xF003, a.values, b.values); + case 194: + return _mm512_mask_blend_ps(0xF00C, a.values, b.values); + case 195: + return _mm512_mask_blend_ps(0xF00F, a.values, b.values); + case 196: + return _mm512_mask_blend_ps(0xF030, a.values, b.values); + case 197: + return _mm512_mask_blend_ps(0xF033, a.values, b.values); + case 198: + return _mm512_mask_blend_ps(0xF03C, a.values, b.values); + case 199: + return _mm512_mask_blend_ps(0xF03F, a.values, b.values); + case 200: + return _mm512_mask_blend_ps(0XF0C0, a.values, b.values); + case 201: + return _mm512_mask_blend_ps(0xF0C3, a.values, b.values); + case 202: + return _mm512_mask_blend_ps(0xF0CC, a.values, b.values); + case 203: + return _mm512_mask_blend_ps(0xF0CF, a.values, b.values); + case 204: + return _mm512_mask_blend_ps(0xF0F0, a.values, b.values); + case 205: + return _mm512_mask_blend_ps(0xF0F3, a.values, b.values); + case 206: + return _mm512_mask_blend_ps(0xF0FC, a.values, b.values); + case 207: + return _mm512_mask_blend_ps(0xF0FF, a.values, b.values); + case 208: + return _mm512_mask_blend_ps(0XF300, a.values, b.values); + case 209: + return _mm512_mask_blend_ps(0xF303, a.values, b.values); + case 210: + return _mm512_mask_blend_ps(0xF30C, a.values, b.values); + case 211: + return _mm512_mask_blend_ps(0xF30F, a.values, b.values); + case 212: + return _mm512_mask_blend_ps(0xF330, a.values, b.values); + case 213: + return _mm512_mask_blend_ps(0xF333, a.values, b.values); + case 214: + return _mm512_mask_blend_ps(0XF33C, a.values, b.values); + case 215: + return _mm512_mask_blend_ps(0xF33F, a.values, b.values); + case 216: + return _mm512_mask_blend_ps(0xF3C0, a.values, b.values); + case 217: + return _mm512_mask_blend_ps(0xF3C3, a.values, b.values); + case 218: + return _mm512_mask_blend_ps(0xF3CC, a.values, b.values); + case 219: + return _mm512_mask_blend_ps(0xF3CF, a.values, b.values); + case 220: + return _mm512_mask_blend_ps(0xF3F0, a.values, b.values); + case 221: + return _mm512_mask_blend_ps(0xF3F3, a.values, b.values); + case 222: + return _mm512_mask_blend_ps(0xF3FC, a.values, b.values); + case 223: + return _mm512_mask_blend_ps(0XF3FF, a.values, b.values); + case 224: + return _mm512_mask_blend_ps(0xFC00, a.values, b.values); + case 225: + return _mm512_mask_blend_ps(0xFC03, a.values, b.values); + case 226: + return _mm512_mask_blend_ps(0xFC0C, a.values, b.values); + case 227: + return _mm512_mask_blend_ps(0xFC0F, a.values, b.values); + case 228: + return _mm512_mask_blend_ps(0xFC30, a.values, b.values); + case 229: + return _mm512_mask_blend_ps(0xFC33, a.values, b.values); + case 230: + return _mm512_mask_blend_ps(0xFC3C, a.values, b.values); + case 231: + return _mm512_mask_blend_ps(0xFC3F, a.values, b.values); + case 232: + return _mm512_mask_blend_ps(0xFCC0, a.values, b.values); + case 233: + return _mm512_mask_blend_ps(0xFCC3, a.values, b.values); + case 234: + return _mm512_mask_blend_ps(0xFCCC, a.values, b.values); + case 235: + return _mm512_mask_blend_ps(0xFCCF, a.values, b.values); + case 236: + return _mm512_mask_blend_ps(0xFCF0, a.values, b.values); + case 237: + return _mm512_mask_blend_ps(0xFCF3, a.values, b.values); + case 238: + return _mm512_mask_blend_ps(0xFCFC, a.values, b.values); + case 239: + return _mm512_mask_blend_ps(0xFCFF, a.values, b.values); + case 240: + return _mm512_mask_blend_ps(0xFF00, a.values, b.values); + case 241: + return _mm512_mask_blend_ps(0xFF03, a.values, b.values); + case 242: + return _mm512_mask_blend_ps(0xFF0C, a.values, b.values); + case 243: + return _mm512_mask_blend_ps(0xFF0F, a.values, b.values); + case 244: + return _mm512_mask_blend_ps(0xFF30, a.values, b.values); + case 245: + return _mm512_mask_blend_ps(0xFF33, a.values, b.values); + case 246: + return _mm512_mask_blend_ps(0xFF3C, a.values, b.values); + case 247: + return _mm512_mask_blend_ps(0xFF3F, a.values, b.values); + case 248: + return _mm512_mask_blend_ps(0xFFC0, a.values, b.values); + case 249: + return _mm512_mask_blend_ps(0xFFC3, a.values, b.values); + case 250: + return _mm512_mask_blend_ps(0xFFCC, a.values, b.values); + case 251: + return _mm512_mask_blend_ps(0xFFCF, a.values, b.values); + case 252: + return _mm512_mask_blend_ps(0xFFF0, a.values, b.values); + case 253: + return _mm512_mask_blend_ps(0xFFF3, a.values, b.values); + case 254: + return _mm512_mask_blend_ps(0xFFFC, a.values, b.values); + default: break; + } + return b; + } + static Vectorized> blendv(const Vectorized>& a, + const Vectorized>& b, + const Vectorized>& mask) { + // convert c10::complex index mask to V index mask: xy -> xxyy + auto mask_ = _mm512_unpacklo_ps(mask.values, mask.values); + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask_), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_ps(mmask, a.values, b.values); + } + template + static Vectorized> arange(c10::complex base = 0., + step_t step = static_cast(1)) { + return Vectorized>(base, + base + step, + base + c10::complex(2)*step, + base + c10::complex(3)*step, + base + c10::complex(4)*step, + base + c10::complex(5)*step, + base + c10::complex(6)*step, + base + c10::complex(7)*step); + } + static Vectorized> set(const Vectorized>& a, + const Vectorized>& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized> loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_ps(reinterpret_cast(ptr)); + + __at_align__ float tmp_values[2*size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(2*size())) { + tmp_values[i] = 0.0; + } + std::memcpy( + tmp_values, + reinterpret_cast(ptr), + count * sizeof(c10::complex)); + return _mm512_load_ps(tmp_values); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + float tmp_values[2*size()]; + _mm512_storeu_ps(reinterpret_cast(tmp_values), values); + std::memcpy(ptr, tmp_values, count * sizeof(c10::complex)); + } + } + // AVX512 doesn't have horizontal add & horizontal sub instructions. + // TODO: hadd_pd() & hsub_pd() may have scope for improvement. + static inline __m512 hadd_ps(__m512 a, __m512 b) { + __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1); + return _mm512_add_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); + } + static inline __m512 hsub_ps(__m512 a, __m512 b) { + __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0); + __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1); + return _mm512_sub_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), + _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); + } + const c10::complex& operator[](int idx) const = delete; + c10::complex& operator[](int idx) = delete; + Vectorized> map(c10::complex (*const f)(const c10::complex &)) const { + __at_align__ c10::complex tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + __m512 abs_2_() const { + auto val_2 = _mm512_mul_ps(values, values); // a*a b*b + auto ret = hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b + return ret; + } + __m512 abs_() const { + auto real = _mm512_moveldup_ps(values); // real real + auto imag = _mm512_movehdup_ps(values); // imag imag + return Sleef_hypotf16_u05(real, imag); // abs abs + } + Vectorized> abs() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm512_and_ps(abs_(), real_mask); // abs 0 + } + __m512 angle_() const { + //angle = atan2(b/a) + auto b_a = _mm512_permute_ps(values, 0xB1); // b a + return Sleef_atan2f16_u10(values, b_a); // 90-angle angle + } + Vectorized> angle() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + auto angle = _mm512_permute_ps(angle_(), 0xB1); // angle 90-angle + return _mm512_and_ps(angle, real_mask); // angle 0 + } + Vectorized> sgn() const { + auto abs = abs_(); + auto zero = _mm512_setzero_ps(); + auto mask = _mm512_cmp_ps_mask(abs, zero, _CMP_EQ_OQ); + auto div = _mm512_div_ps(values, abs); + return _mm512_mask_blend_ps(mask, div, zero); + } + __m512 real_() const { + const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000)); + return _mm512_and_ps(values, real_mask); + } + Vectorized> real() const { + return real_(); + } + __m512 imag_() const { + const __m512 imag_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF)); + return _mm512_and_ps(values, imag_mask); + } + Vectorized> imag() const { + return _mm512_permute_ps(imag_(), 0xB1); //b a + } + __m512 conj_() const { + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + return _mm512_xor_ps(values, sign_mask); // a -b + } + Vectorized> conj() const { + return conj_(); + } + Vectorized> log() const { + // Most trigonomic ops use the log() op to improve complex number performance. + return map(std::log); + } + Vectorized> log2() const { + const __m512 log2_ = _mm512_set1_ps(std::log(2)); + return _mm512_div_ps(log(), log2_); + } + Vectorized> log10() const { + const __m512 log10_ = _mm512_set1_ps(std::log(10)); + return _mm512_div_ps(log(), log10_); + } + Vectorized> log1p() const { + return map(std::log1p); + } + Vectorized> asin() const { + // asin(x) + // = -i*ln(iz + sqrt(1 -z^2)) + // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) + // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) + const __m512 one = _mm512_set1_ps(1); + + auto conj = conj_(); + auto b_a = _mm512_permute_ps(conj, 0xB1); //-b a + auto ab = _mm512_mul_ps(conj, b_a); //-ab -ab + auto im = _mm512_add_ps(ab, ab); //-2ab -2ab + + auto val_2 = _mm512_mul_ps(values, values); // a*a b*b + auto re = hsub_ps(val_2, _mm512_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a + re = _mm512_sub_ps(one, re); + + auto root = Vectorized(_mm512_mask_blend_ps(0xAAAA, re, im)).sqrt(); //sqrt(re + i*im) + auto ln = Vectorized(_mm512_add_ps(b_a, root)).log(); //ln(iz + sqrt()) + return Vectorized(_mm512_permute_ps(ln.values, 0xB1)).conj(); //-i*ln() + } + Vectorized> acos() const { + return map(std::acos); + } + Vectorized> atan() const; + Vectorized> atanh() const { + return map(std::atanh); + } + Vectorized> exp() const { + //exp(a + bi) + // = exp(a)*(cos(b) + sin(b)i) + auto exp = Sleef_expf16_u10(values); //exp(a) exp(b) + exp = _mm512_mask_blend_ps(0xAAAA, exp, _mm512_permute_ps(exp, 0xB1)); //exp(a) exp(a) + + auto sin_cos = Sleef_sincosf16_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)] + auto cos_sin = _mm512_mask_blend_ps(0xAAAA, _mm512_permute_ps(sin_cos.y, 0xB1), + sin_cos.x); //cos(b) sin(b) + return _mm512_mul_ps(exp, cos_sin); + } + Vectorized> exp2() const { + // Use identity 2**x = exp(log(2) * x) + const __m512 ln_2 = _mm512_set1_ps(c10::ln_2); + Vectorized> scaled_values = _mm512_mul_ps(values, ln_2); + return scaled_values.exp(); + } + Vectorized> expm1() const { + return map(std::expm1); + } + Vectorized> sin() const { + return map(std::sin); + } + Vectorized> sinh() const { + return map(std::sinh); + } + Vectorized> cos() const { + return map(std::cos); + } + Vectorized> cosh() const { + return map(std::cosh); + } + Vectorized> ceil() const { + return _mm512_ceil_ps(values); + } + Vectorized> floor() const { + return _mm512_floor_ps(values); + } + Vectorized> neg() const { + auto zero = _mm512_setzero_ps(); + return _mm512_sub_ps(zero, values); + } + Vectorized> round() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized> tan() const { + return map(std::tan); + } + Vectorized> tanh() const { + return map(std::tanh); + } + Vectorized> trunc() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized> sqrt() const { + return map(std::sqrt); + } + Vectorized> reciprocal() const; + Vectorized> rsqrt() const { + return sqrt().reciprocal(); + } + Vectorized> pow(const Vectorized> &exp) const { + __at_align__ c10::complex x_tmp[size()]; + __at_align__ c10::complex y_tmp[size()]; + store(x_tmp); + exp.store(y_tmp); + for (const auto i : c10::irange(size())) { + x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); + } + return loadu(x_tmp); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized> operator==(const Vectorized>& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF)); + } + Vectorized> operator!=(const Vectorized>& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF)); + } + Vectorized> operator<(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator<=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + Vectorized> operator>=(const Vectorized>& other) const { + TORCH_CHECK(false, "not supported for complex numbers"); + } + + Vectorized> eq(const Vectorized>& other) const; + Vectorized> ne(const Vectorized>& other) const; +}; + +template <> Vectorized> inline operator+(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_add_ps(a, b); +} + +template <> Vectorized> inline operator-(const Vectorized> &a, + const Vectorized> &b) { + return _mm512_sub_ps(a, b); +} + +template <> Vectorized> inline operator*(const Vectorized> &a, + const Vectorized> &b) { + //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto ac_bd = _mm512_mul_ps(a, b); //ac bd + + auto d_c = _mm512_permute_ps(b, 0xB1); //d c + d_c = _mm512_xor_ps(sign_mask, d_c); //d -c + auto ad_bc = _mm512_mul_ps(a, d_c); //ad -bc + + auto ret = Vectorized>::hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc + return ret; +} + +template <> Vectorized> inline operator/(const Vectorized> &a, + const Vectorized> &b) { + //re + im*i = (a + bi) / (c + di) + auto mask = _mm512_set1_ps(-0.f); + auto fabs_cd = _mm512_andnot_ps(mask, b); // |c| |d| + auto fabs_dc = _mm512_permute_ps(fabs_cd, 0xB1); // |d| |c| + auto scale = _mm512_rcp14_ps(_mm512_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc + auto a2 = _mm512_mul_ps(a, scale); // a/sc b/sc + auto b2 = _mm512_mul_ps(b, scale); // c/sc d/sc + auto acbd2 = _mm512_mul_ps(a2, b2); + + const __m512 sign_mask = _mm512_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, + -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0); + auto dc2 = _mm512_permute_ps(b2, 0xB1); // d/sc c/sc + dc2 = _mm512_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc + auto adbc2 = _mm512_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2 + auto res2 = Vectorized>::hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2 + + // get the denominator + auto denom2 = Vectorized>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2 + res2 = _mm512_div_ps(res2, denom2); + return res2; +} + +// reciprocal. Implement this here so we can use multiplication. +inline Vectorized> Vectorized>::reciprocal() const { + //re + im*i = (a + bi) / (c + di) + //re = (ac + bd)/abs_2() = c/abs_2() + //im = (bc - ad)/abs_2() = d/abs_2() + const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, + 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0); + auto c_d = _mm512_xor_ps(sign_mask, values); //c -d + return _mm512_div_ps(c_d, abs_2_()); +} + +inline Vectorized> Vectorized>::atan() const { + // atan(x) = i/2 * ln((i + z)/(i - z)) + const __m512 i = _mm512_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, + 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0); + const Vectorized i_half = _mm512_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, + 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5); + + auto sum = Vectorized(_mm512_add_ps(i, values)); // a 1+b + auto sub = Vectorized(_mm512_sub_ps(i, values)); // -a 1-b + auto ln = (sum/sub).log(); // ln((i + z)/(i - z)) + return i_half*ln; // i/2*ln() +} + +template <> +Vectorized> inline maximum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vector = _mm512_set1_epi32(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_LT_OQ); + auto max = _mm512_mask_blend_ps(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF); + return _mm512_or_ps(max, _mm512_castsi512_ps(isnan)); +} + +template <> +Vectorized> inline minimum(const Vectorized>& a, + const Vectorized>& b) { + auto zero_vector = _mm512_set1_epi32(0); + auto abs_a = a.abs_2_(); + auto abs_b = b.abs_2_(); + auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_GT_OQ); + auto min = _mm512_mask_blend_ps(mask, a, b); + // Exploit the fact that all-ones is a NaN. + auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q); + auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF); + return _mm512_or_ps(min, _mm512_castsi512_ps(isnan)); +} + +template <> +Vectorized> inline operator&(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_and_ps(a, b); +} + +template <> +Vectorized> inline operator|(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_or_ps(a, b); +} + +template <> +Vectorized> inline operator^(const Vectorized>& a, + const Vectorized>& b) { + return _mm512_xor_ps(a, b); +} + +inline Vectorized> Vectorized>::eq( + const Vectorized>& other) const { + auto eq = (*this == other); // compares real and imag individually + // If both real numbers and imag numbers are equal, then the complex numbers are equal + return (eq.real() & eq.imag()) & Vectorized>(_mm512_set1_ps(1.0f)); +} + +inline Vectorized> Vectorized>::ne( + const Vectorized>& other) const { + auto ne = (*this != other); // compares real and imag individually + // If either real numbers or imag numbers are not equal, then the complex numbers are not equal + return (ne.real() | ne.imag()) & Vectorized>(_mm512_set1_ps(1.0f)); +} + +#endif + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_convert.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_convert.h new file mode 100644 index 0000000000000000000000000000000000000000..e8ad662a99fc23ee250b18d7bf89b51f71523aaf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_convert.h @@ -0,0 +1,181 @@ +#pragma once + +#include +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + __m512 value; + cvtbf16_fp32(_mm512_castsi512_si256(src[0]), value); + result[0] = value; + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + __m512 value; + cvtfp16_fp32(_mm512_castsi512_si256(src[0]), value); + result[0] = value; + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + VectorizedN result; + result[0] = _mm512_castsi256_si512(cvtfp32_bf16(src[0])); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN result; + result[0] = _mm512_castsi256_si512(cvtfp32_fp16(src[0])); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto low = _mm512_cvtepi64_ps(src[0]); + auto high = _mm512_cvtepi64_ps(src[1]); + return Vectorized( + _mm512_insertf32x8(_mm512_castps256_ps512(low), high, 1)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + at::vec::VectorizedN result; + result[0] = _mm512_cvt_roundps_epi64( + _mm512_castps512_ps256(src[0]), _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); + result[1] = _mm512_cvt_roundps_epi64( + _mm512_extractf32x8_ps(src[0], 1), + _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto low = _mm512_cvtepi64_epi32(src[0]); + auto high = _mm512_cvtepi64_epi32(src[1]); + return Vectorized( + _mm512_inserti32x8(_mm512_castsi256_si512(low), high, 1)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + at::vec::VectorizedN result; + result[0] = _mm512_cvtepi32_epi64(_mm512_castsi512_si256(src[0])); + result[1] = _mm512_cvtepi32_epi64(_mm512_extracti32x8_epi32(src[0], 1)); + return result; + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto src128 = _mm512_castsi512_si128(src[0]); + return Vectorized(_mm512_cvtepi8_epi32(src128)); + } +}; + +template <> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + auto src128 = _mm512_castsi512_si128(src[0]); + return Vectorized(_mm512_cvtepu8_epi32(src128)); + } +}; + +template +struct VecConvert< + dst_t, + 1, + src_t, + 1, + typename std::enable_if_t< + (is_reduced_floating_point_v && is_8bit_integer_v) || + (is_reduced_floating_point_v && is_8bit_integer_v), + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + VectorizedN tmp_fp32 = VecConvert::apply(src); + return VecConvert::apply(tmp_fp32); + } +}; + +template +struct VecConvert< + dst_t, + 1, + float, + 1, + typename std::enable_if_t, + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + return convert_float_to_int8(src[0]); + } +}; + +template +struct VecConvert< + float, + 1, + src_t, + 1, + typename std::enable_if_t, + void>> { + static inline VectorizedN apply(const VectorizedN& src) { + return convert_int8_to_float(src[0]); + } +}; + +template +struct VecConvert< + dst_t, + 1, + int64_t, + 2, + typename std::enable_if< + std::is_same_v || + std::is_same_v>::type> { + static inline VectorizedN apply( + const VectorizedN& src) { + return VecConvert::apply( + VecConvert::apply(src)); + } +}; + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h new file mode 100644 index 0000000000000000000000000000000000000000..508ab257e603bb254b7fd712f05956f6a84ac129 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h @@ -0,0 +1,468 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if (defined(CPU_CAPABILITY_AVX512)) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +template <> class Vectorized { +private: + static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; +public: + // values needs to be public for compilation with clang + // as vec512.h uses it + __m512d values; + using value_type = double; + using size_type = int; + static constexpr size_type size() { + return 8; + } + Vectorized() {} + Vectorized(__m512d v) : values(v) {} + Vectorized(double val) { + values = _mm512_set1_pd(val); + } + Vectorized(double val1, double val2, double val3, double val4, + double val5, double val6, double val7, double val8) { + values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8); + } + operator __m512d() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm512_mask_blend_pd(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); + auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_pd(mmask, a.values, b.values); + } + template + static Vectorized arange(double base = 0., step_t step = static_cast(1)) { + return Vectorized(base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, + base + 7 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_pd(reinterpret_cast(ptr)); + + __mmask8 mask = (1ULL << count) - 1; + return _mm512_maskz_loadu_pd(mask, ptr); + } + void store(void* ptr, int count = size()) const { + if (count == size()) { + _mm512_storeu_pd(reinterpret_cast(ptr), values); + } else if (count > 0) { + __mmask8 mask = (1ULL << count) - 1; + _mm512_mask_storeu_pd(reinterpret_cast(ptr), mask, values); + } + } + const double& operator[](int idx) const = delete; + double& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ); + return static_cast(cmp); + } + Vectorized isnan() const { + auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + bool has_inf_nan() const { + __m512d self_sub = _mm512_sub_pd(values, values); + return (_mm512_movepi8_mask(_mm512_castpd_si512(self_sub)) & 0x7777777777777777) != 0; + } + Vectorized map(double (*const f)(double)) const { + __at_align__ double tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm512_set1_pd(-0.f); + return _mm512_andnot_pd(mask, values); + } + Vectorized angle() const { + const auto zero_vec = _mm512_castsi512_pd(zero_vector); + const auto nan_vec = _mm512_set1_pd(NAN); + const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ); + const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask, + 0xFFFFFFFFFFFFFFFF); + const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_pd(c10::pi); + + const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_pd(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosd8_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshd8_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asind8_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atand8_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhd8_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2d8_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignd8(values, sign)); + } + Vectorized erf() const { + return Vectorized(Sleef_erfd8_u10(values)); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcd8_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expd8_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2d8_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1d8_u10(values)); + } + Vectorized exp_u20() const { + return exp(); + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodd8(values, q)); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotd8_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ double tmp[size()]; + __at_align__ double tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized log() const { + return Vectorized(Sleef_logd8_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2d8_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10d8_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pd8_u10(values)); + } + Vectorized sin() const { + return Vectorized(Sleef_sind8_u10(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhd8_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosd8_u10(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshd8_u10(values)); + } + Vectorized ceil() const { + return _mm512_ceil_pd(values); + } + Vectorized floor() const { + return _mm512_floor_pd(values); + } + Vectorized frac() const; + Vectorized neg() const { + return _mm512_xor_pd(_mm512_set1_pd(-0.), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterd8(values, b)); + } + Vectorized round() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tand8_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhd8_u10(values)); + } + Vectorized trunc() const { + return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammad8_u10(values)); + } + Vectorized sqrt() const { + return _mm512_sqrt_pd(values); + } + Vectorized reciprocal() const { + return _mm512_div_pd(_mm512_set1_pd(1), values); + } + Vectorized rsqrt() const { + return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powd8_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator!=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator<=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized operator>=(const Vectorized& other) const { + auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ); + return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, + 0xFFFFFFFFFFFFFFFF)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_pd(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_pd(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mul_pd(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm512_div_pd(a, b); +} + +// frac. Implement this here so we can use subtraction. +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized max = _mm512_max_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi64(0); + Vectorized min = _mm512_min_pd(a, b); + auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, + 0xFFFFFFFFFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_pd(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm512_min_pd(max, _mm512_max_pd(min, a)); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm512_max_pd(min, a); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm512_min_pd(max, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_pd(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_pd(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_pd(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0); +} + +template <> +inline void convert(const double* src, double* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmadd_pd(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmsub_pd(a, b, c); +} + +#endif + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h new file mode 100644 index 0000000000000000000000000000000000000000..a08df3c141a3809162f7f7032ca038dad3176e43 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h @@ -0,0 +1,794 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include +#if defined(CPU_CAPABILITY_AVX512) +#define SLEEF_STATIC_LIBS +#include +#endif + +namespace at { +namespace vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +template <> class Vectorized { +private: + static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0}; +public: + __m512 values; + using value_type = float; + using size_type = int; + static constexpr size_type size() { + return 16; + } + Vectorized() {} + Vectorized(__m512 v) : values(v) {} + Vectorized(float val) { + values = _mm512_set1_ps(val); + } + Vectorized(float val1, float val2, float val3, float val4, + float val5, float val6, float val7, float val8, + float val9, float val10, float val11, float val12, + float val13, float val14, float val15, float val16) { + values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8, + val9, val10, val11, val12, val13, val14, val15, val16); + } + operator __m512() const { + return values; + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + return _mm512_mask_blend_ps(mask, a.values, b.values); + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ); + return _mm512_mask_blend_ps(mmask, a.values, b.values); + } + template + static Vectorized arange(float base = 0.f, step_t step = static_cast(1)) { + return Vectorized( + base, base + step, base + 2 * step, base + 3 * step, + base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, + base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, + base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); + } + static Vectorized set(const Vectorized& a, const Vectorized& b, + int64_t count = size()) { + switch (count) { + case 0: + return a; + case 1: + return blend<1>(a, b); + case 2: + return blend<3>(a, b); + case 3: + return blend<7>(a, b); + case 4: + return blend<15>(a, b); + case 5: + return blend<31>(a, b); + case 6: + return blend<63>(a, b); + case 7: + return blend<127>(a, b); + case 8: + return blend<255>(a, b); + case 9: + return blend<511>(a, b); + case 10: + return blend<1023>(a, b); + case 11: + return blend<2047>(a, b); + case 12: + return blend<4095>(a, b); + case 13: + return blend<8191>(a, b); + case 14: + return blend<16383>(a, b); + case 15: + return blend<32767>(a, b); + } + return b; + } + static Vectorized loadu(const void* ptr, int64_t count = size()) { + if (count == size()) + return _mm512_loadu_ps(reinterpret_cast(ptr)); + + __mmask16 mask = (1ULL << count) - 1; + return _mm512_maskz_loadu_ps(mask, ptr); + } + void store(void* ptr, int64_t count = size()) const { + if (count == size()) { + _mm512_storeu_ps(reinterpret_cast(ptr), values); + } else if (count > 0) { + __mmask16 mask = (1ULL << count) - 1; + _mm512_mask_storeu_ps(reinterpret_cast(ptr), mask, values); + } + } + const float& operator[](int idx) const = delete; + float& operator[](int idx) = delete; + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + __mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ); + return static_cast(cmp); + } + Vectorized isnan() const { + auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + bool has_inf_nan() const { + __m512 self_sub = _mm512_sub_ps(values, values); + return (_mm512_movepi8_mask(_mm512_castps_si512(self_sub)) & 0x7777777777777777) != 0; + } + Vectorized map(float (*const f)(float)) const { + __at_align__ float tmp[size()]; + store(tmp); + for (const auto i : c10::irange(size())) { + tmp[i] = f(tmp[i]); + } + return loadu(tmp); + } + Vectorized abs() const { + auto mask = _mm512_set1_ps(-0.f); + return _mm512_andnot_ps(mask, values); + } + Vectorized angle() const { + __m512 zero_vec = _mm512_set1_ps(0.f); + const auto nan_vec = _mm512_set1_ps(NAN); + const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ); + const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec), + not_nan_mask, 0xFFFFFFFF); + const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec), + zero_vec, _CMP_EQ_OQ); + const auto pi = _mm512_set1_ps(c10::pi); + + const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ); + auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi); + angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec); + return angle; + } + Vectorized real() const { + return *this; + } + Vectorized imag() const { + return _mm512_set1_ps(0); + } + Vectorized conj() const { + return *this; + } + Vectorized acos() const { + return Vectorized(Sleef_acosf16_u10(values)); + } + Vectorized acosh() const { + return Vectorized(Sleef_acoshf16_u10(values)); + } + Vectorized asin() const { + return Vectorized(Sleef_asinf16_u10(values)); + } + Vectorized atan() const { + return Vectorized(Sleef_atanf16_u10(values)); + } + Vectorized atanh() const { + return Vectorized(Sleef_atanhf16_u10(values)); + } + Vectorized atan2(const Vectorized &b) const { + return Vectorized(Sleef_atan2f16_u10(values, b)); + } + Vectorized copysign(const Vectorized &sign) const { + return Vectorized(Sleef_copysignf16(values, sign)); + } + Vectorized erf() const { + // constants + const auto neg_zero_vec = _mm512_set1_ps(-0.f); + const auto one_vec = _mm512_set1_ps(1.0f); + const auto p = _mm512_set1_ps(0.3275911f); + const auto p1 = _mm512_set1_ps(0.254829592f); + const auto p2 = _mm512_set1_ps(-0.284496736f); + const auto p3 = _mm512_set1_ps(1.421413741f); + const auto p4 = _mm512_set1_ps(-1.453152027f); + const auto p5 = _mm512_set1_ps(1.061405429f); + // sign(x) + auto sign_mask = _mm512_and_ps(neg_zero_vec, values); + auto abs_vec = _mm512_abs_ps(values); + // t = 1 / (p * abs(x) + 1) + auto tmp0 = _mm512_fmadd_ps(p, abs_vec, one_vec); + auto t = _mm512_div_ps(one_vec, tmp0); + // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1 + auto tmp1 = _mm512_fmadd_ps(p5, t, p4); + auto tmp2 = _mm512_fmadd_ps(tmp1, t, p3); + auto tmp3 = _mm512_fmadd_ps(tmp2, t, p2); + auto r = _mm512_fmadd_ps(tmp3, t, p1); + // - exp(- x * x) + auto pow_2 = _mm512_mul_ps(values, values); + auto neg_pow_2 = _mm512_xor_ps(neg_zero_vec, pow_2); + // auto tmp4 = exp(neg_pow_2); + auto tmp4 = Vectorized(Sleef_expf16_u10(neg_pow_2)); + auto tmp5 = _mm512_xor_ps(neg_zero_vec, tmp4); + // erf(x) = sign(x) * (1 - r * t * exp(- x * x)) + auto tmp6 = _mm512_mul_ps(tmp5, t); + auto tmp7 = _mm512_fmadd_ps(tmp6, r, one_vec); + return _mm512_xor_ps(sign_mask, tmp7); + } + Vectorized erfc() const { + return Vectorized(Sleef_erfcf16_u15(values)); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return Vectorized(Sleef_expf16_u10(values)); + } + Vectorized exp2() const { + return Vectorized(Sleef_exp2f16_u10(values)); + } + Vectorized expm1() const { + return Vectorized(Sleef_expm1f16_u10(values)); + } + Vectorized exp_u20() const { + // A faster version of exp with ULP=20 + static __m512 vec_factorial_1 = + _mm512_set1_ps(0.999999701f); // 1/factorial(1) + static __m512 vec_factorial_2 = + _mm512_set1_ps(0.499991506f); // 1/factorial(2) + static __m512 vec_factorial_3 = + _mm512_set1_ps(0.166676521f); // 1/factorial(3) + static __m512 vec_factorial_4 = + _mm512_set1_ps(0.0418978221f); // 1/factorial(4) + static __m512 vec_factorial_5 = + _mm512_set1_ps(0.00828929059f); // 1/factorial(5) + static __m512 vec_exp_log2ef = + _mm512_castsi512_ps(_mm512_set1_epi32(0x3fb8aa3b)); // log2(e) + static __m512 vec_half = _mm512_set1_ps(0.5f); + static __m512 vec_one = _mm512_set1_ps(1.f); + static __m512 vec_zero = _mm512_set1_ps(0.f); + static __m512 vec_two = _mm512_set1_ps(2.f); + static __m512 vec_ln2f = _mm512_castsi512_ps(_mm512_set1_epi32(0x3f317218)); // ln(2) + static __m512 vec_ln_flt_min = _mm512_castsi512_ps(_mm512_set1_epi32(0xc2aeac50)); + static __m512 vec_ln_flt_max = _mm512_castsi512_ps(_mm512_set1_epi32(0x42b17218)); + static __m512i vec_127 = _mm512_set1_epi32(0x0000007f); + static int n_mantissa_bits = 23; + + // exp(x) = + // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem + // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression + + auto less_ln_flt_min_mask = + _mm512_cmp_ps_mask(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/); + auto vec_src = _mm512_min_ps(values, vec_ln_flt_max); + vec_src = _mm512_max_ps(vec_src, vec_ln_flt_min); + + // fx = floorf(x * log2ef + 0.5) + auto vec_fx = _mm512_fmadd_ps(vec_src, vec_exp_log2ef, vec_half); + auto vec_fx_i = _mm512_cvt_roundps_epi32( + vec_fx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC); + vec_fx = _mm512_cvtepi32_ps(vec_fx_i); + + // x = x - fx * ln2 + auto vec_exp_poly = _mm512_fnmadd_ps(vec_fx, vec_ln2f, vec_src); + + // compute polynomial + auto vec_res = + _mm512_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4); + vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3); + vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2); + vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1); + vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_one); + + // compute 2^(n-1) + auto vec_exp_number = _mm512_sub_ps(vec_fx, vec_one); + auto vec_exp_number_i = _mm512_cvtps_epi32(vec_exp_number); + auto vec_two_pow_n_i = _mm512_add_epi32(vec_exp_number_i, vec_127); + vec_two_pow_n_i = _mm512_slli_epi32(vec_two_pow_n_i, n_mantissa_bits); + auto vec_two_pow_n = _mm512_castsi512_ps(vec_two_pow_n_i); + vec_two_pow_n = + _mm512_mask_blend_ps(less_ln_flt_min_mask, vec_two_pow_n, vec_zero); + + // y = y * 2^n + vec_res = _mm512_mul_ps(vec_res, vec_two_pow_n); + vec_res = _mm512_mul_ps(vec_res, vec_two); + return vec_res; + } + Vectorized fmod(const Vectorized& q) const { + return Vectorized(Sleef_fmodf16(values, q)); + } + Vectorized log() const { + return Vectorized(Sleef_logf16_u10(values)); + } + Vectorized log2() const { + return Vectorized(Sleef_log2f16_u10(values)); + } + Vectorized log10() const { + return Vectorized(Sleef_log10f16_u10(values)); + } + Vectorized log1p() const { + return Vectorized(Sleef_log1pf16_u10(values)); + } + Vectorized frac() const; + Vectorized sin() const { + return Vectorized(Sleef_sinf16_u35(values)); + } + Vectorized sinh() const { + return Vectorized(Sleef_sinhf16_u10(values)); + } + Vectorized cos() const { + return Vectorized(Sleef_cosf16_u35(values)); + } + Vectorized cosh() const { + return Vectorized(Sleef_coshf16_u10(values)); + } + Vectorized ceil() const { + return _mm512_ceil_ps(values); + } + Vectorized floor() const { + return _mm512_floor_ps(values); + } + Vectorized hypot(const Vectorized &b) const { + return Vectorized(Sleef_hypotf16_u05(values, b)); + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igamma(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized igammac(const Vectorized &x) const { + __at_align__ float tmp[size()]; + __at_align__ float tmp_x[size()]; + store(tmp); + x.store(tmp_x); + for (const auto i : c10::irange(size())) { + tmp[i] = calc_igammac(tmp[i], tmp_x[i]); + } + return loadu(tmp); + } + Vectorized neg() const { + return _mm512_xor_ps(_mm512_set1_ps(-0.f), values); + } + Vectorized nextafter(const Vectorized &b) const { + return Vectorized(Sleef_nextafterf16(values, b)); + } + Vectorized round() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + } + Vectorized tan() const { + return Vectorized(Sleef_tanf16_u10(values)); + } + Vectorized tanh() const { + return Vectorized(Sleef_tanhf16_u10(values)); + } + Vectorized trunc() const { + return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); + } + Vectorized lgamma() const { + return Vectorized(Sleef_lgammaf16_u10(values)); + } + Vectorized sqrt() const { + return _mm512_sqrt_ps(values); + } + Vectorized reciprocal() const { + return _mm512_div_ps(_mm512_set1_ps(1), values); + } + Vectorized rsqrt() const { + return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values)); + } + Vectorized pow(const Vectorized &b) const { + return Vectorized(Sleef_powf16_u10(values, b)); + } + // Comparison using the _CMP_**_OQ predicate. + // `O`: get false if an operand is NaN + // `Q`: do not raise if an operand is NaN + Vectorized operator==(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator!=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator<(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator<=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator>(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized operator>=(const Vectorized& other) const { + auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ); + return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask, + 0xFFFFFFFF)); + } + + Vectorized eq(const Vectorized& other) const; + Vectorized ne(const Vectorized& other) const; + Vectorized gt(const Vectorized& other) const; + Vectorized ge(const Vectorized& other) const; + Vectorized lt(const Vectorized& other) const; + Vectorized le(const Vectorized& other) const; +}; + +template <> +Vectorized inline operator+(const Vectorized& a, const Vectorized& b) { + return _mm512_add_ps(a, b); +} + +template <> +Vectorized inline operator-(const Vectorized& a, const Vectorized& b) { + return _mm512_sub_ps(a, b); +} + +template <> +Vectorized inline operator*(const Vectorized& a, const Vectorized& b) { + return _mm512_mul_ps(a, b); +} + +template <> +Vectorized inline operator/(const Vectorized& a, const Vectorized& b) { + return _mm512_div_ps(a, b); +} + +// frac. Implement this here so we can use subtraction +inline Vectorized Vectorized::frac() const { + return *this - this->trunc(); +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi32(0); + auto max = _mm512_max_ps(a, b); + auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_ps(max, isnan); +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template <> +Vectorized inline minimum(const Vectorized& a, const Vectorized& b) { + auto zero_vec = _mm512_set1_epi32(0); + auto min = _mm512_min_ps(a, b); + auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q); + auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask, + 0xFFFFFFFF)); + // Exploit the fact that all-ones is a NaN. + return _mm512_or_ps(min, isnan); +} + +template <> +Vectorized inline clamp(const Vectorized& a, const Vectorized& min, const Vectorized& max) { + return _mm512_min_ps(max, _mm512_max_ps(min, a)); +} + +template <> +Vectorized inline clamp_max(const Vectorized& a, const Vectorized& max) { + return _mm512_min_ps(max, a); +} + +template <> +Vectorized inline clamp_min(const Vectorized& a, const Vectorized& min) { + return _mm512_max_ps(min, a); +} + +template <> +Vectorized inline operator&(const Vectorized& a, const Vectorized& b) { + return _mm512_and_ps(a, b); +} + +template <> +Vectorized inline operator|(const Vectorized& a, const Vectorized& b) { + return _mm512_or_ps(a, b); +} + +template <> +Vectorized inline operator^(const Vectorized& a, const Vectorized& b) { + return _mm512_xor_ps(a, b); +} + +inline Vectorized Vectorized::eq(const Vectorized& other) const { + return (*this == other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ne(const Vectorized& other) const { + return (*this != other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::gt(const Vectorized& other) const { + return (*this > other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::ge(const Vectorized& other) const { + return (*this >= other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::lt(const Vectorized& other) const { + return (*this < other) & Vectorized(1.0f); +} + +inline Vectorized Vectorized::le(const Vectorized& other) const { + return (*this <= other) & Vectorized(1.0f); +} + +template <> +inline void convert(const float* src, float* dst, int64_t n) { + int64_t i; +#pragma unroll + for (i = 0; i <= (n - Vectorized::size()); i += Vectorized::size()) { + _mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i)); + } +#pragma unroll + for (; i < n; i++) { + dst[i] = src[i]; + } +} + +template <> +Vectorized inline fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmadd_ps(a, b, c); +} + +template <> +Vectorized inline fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return _mm512_fmsub_ps(a, b, c); +} + +// TODO(jgong5): rewrite with ATEN vectorized (need to add unpack and shuffle) +// Used by Inductor CPP codegen +// Code referred to FBGEMM: +// https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6 +// 16 * 6 = 96 instructions +template<> +inline void transpose_mxn( + const float* src, + int64_t ld_src, + float* dst, + int64_t ld_dst) { + // load from src to registers + // a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 + // b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 + // c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 + // d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 + // e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15 + // f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 + // g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15 + // h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15 + // i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15 + // j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15 + // k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15 + // l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15 + // m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15 + // n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15 + // o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15 + // p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 + __m512 a = _mm512_loadu_ps(&src[0 * ld_src]); + __m512 b = _mm512_loadu_ps(&src[1 * ld_src]); + __m512 c = _mm512_loadu_ps(&src[2 * ld_src]); + __m512 d = _mm512_loadu_ps(&src[3 * ld_src]); + __m512 e = _mm512_loadu_ps(&src[4 * ld_src]); + __m512 f = _mm512_loadu_ps(&src[5 * ld_src]); + __m512 g = _mm512_loadu_ps(&src[6 * ld_src]); + __m512 h = _mm512_loadu_ps(&src[7 * ld_src]); + __m512 i = _mm512_loadu_ps(&src[8 * ld_src]); + __m512 j = _mm512_loadu_ps(&src[9 * ld_src]); + __m512 k = _mm512_loadu_ps(&src[10 * ld_src]); + __m512 l = _mm512_loadu_ps(&src[11 * ld_src]); + __m512 m = _mm512_loadu_ps(&src[12 * ld_src]); + __m512 n = _mm512_loadu_ps(&src[13 * ld_src]); + __m512 o = _mm512_loadu_ps(&src[14 * ld_src]); + __m512 p = _mm512_loadu_ps(&src[15 * ld_src]); + + __m512 ta, tb, tc, td, te, tf, tg, th, ti, tj, tk, tl, tm, tn, to, tq; + // unpacking and interleaving 32-bit elements + // a0 b0 a1 b1 a4 b4 a5 b5 a8 b8 a9 b9 a12 b12 a13 b13 + // a2 b2 a3 b3 a6 b6 a7 b7 a10 b10 a11 b11 a14 b14 a15 b15 + // c0 d0 c1 d1 ... + // c2 d2 c3 d3 ... + // e0 f0 e1 f1 ... + // e2 f2 e3 f3 ... + // g0 h0 g1 h1 ... + // g2 h2 g3 h3 ... + // i0 ... + // i2 ... + // k0 ... + // k2 ... + // m0 ... + // m2 ... + // o0 ... + // o1 ... + ta = _mm512_unpacklo_ps(a, b); + tb = _mm512_unpackhi_ps(a, b); + tc = _mm512_unpacklo_ps(c, d); + td = _mm512_unpackhi_ps(c, d); + te = _mm512_unpacklo_ps(e, f); + tf = _mm512_unpackhi_ps(e, f); + tg = _mm512_unpacklo_ps(g, h); + th = _mm512_unpackhi_ps(g, h); + ti = _mm512_unpacklo_ps(i, j); + tj = _mm512_unpackhi_ps(i, j); + tk = _mm512_unpacklo_ps(k, l); + tl = _mm512_unpackhi_ps(k, l); + tm = _mm512_unpacklo_ps(m, n); + tn = _mm512_unpackhi_ps(m, n); + to = _mm512_unpacklo_ps(o, p); + tq = _mm512_unpackhi_ps(o, p); + + // unpacking and interleaving 64-bit elements + // a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8 a12 b12 c12 d12 + // a1 b1 c1 d1 ... + // a2 b2 c2 d2 ... + // a3 b3 c3 d3 ... + // e0 f0 g0 h0 e4 f4 g4 h4 e8 f8 g8 h8 e12 f12 g12 h12 + // e1 f1 g1 h1 ... + // e2 f2 g2 h2 ... + // e3 f3 g3 h3 ... + // i0 j0 k0 l0 ... + // i1 j1 k1 l1 ... + // i2 j2 k2 l2 ... + // i3 j3 k3 l3 ... + // m0 n0 o0 p0 ... + // m1 n1 o1 p1 ... + // m2 n2 o2 p2 ... + // m3 n3 o3 p3 ... + a = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc))); + b = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc))); + c = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td))); + d = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td))); + e = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg))); + f = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg))); + g = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th))); + h = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th))); + i = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk))); + j = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk))); + k = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl))); + l = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl))); + m = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to))); + n = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to))); + o = _mm512_castpd_ps( + _mm512_unpacklo_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq))); + p = _mm512_castpd_ps( + _mm512_unpackhi_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq))); + + // shuffle 128-bits (composed of 4 32-bit elements) + // a0 b0 c0 d0 a8 b8 c8 d8 e0 f0 g0 h0 e8 f8 g8 h8 + // a1 b1 c1 d1 ... + // a2 b2 c2 d2 ... + // a3 b3 c3 d3 ... + // a4 b4 c4 d4 ... + // a5 b5 c5 d5 ... + // a6 b6 c6 d6 ... + // a7 b7 c7 d7 ... + // i0 j0 k0 l0 i8 j8 k8 l8 m0 n0 o0 p0 m8 n8 o8 p8 + // i1 j1 k1 l1 ... + // i2 j2 k2 l2 ... + // i3 j3 k3 l3 ... + // i4 j4 k4 l4 ... + // i5 j5 k5 l5 ... + // i6 j6 k6 l6 ... + // i7 j7 k7 l7 ... + ta = _mm512_shuffle_f32x4(a, e, 0x88); + tb = _mm512_shuffle_f32x4(b, f, 0x88); + tc = _mm512_shuffle_f32x4(c, g, 0x88); + td = _mm512_shuffle_f32x4(d, h, 0x88); + te = _mm512_shuffle_f32x4(a, e, 0xdd); + tf = _mm512_shuffle_f32x4(b, f, 0xdd); + tg = _mm512_shuffle_f32x4(c, g, 0xdd); + th = _mm512_shuffle_f32x4(d, h, 0xdd); + ti = _mm512_shuffle_f32x4(i, m, 0x88); + tj = _mm512_shuffle_f32x4(j, n, 0x88); + tk = _mm512_shuffle_f32x4(k, o, 0x88); + tl = _mm512_shuffle_f32x4(l, p, 0x88); + tm = _mm512_shuffle_f32x4(i, m, 0xdd); + tn = _mm512_shuffle_f32x4(j, n, 0xdd); + to = _mm512_shuffle_f32x4(k, o, 0xdd); + tq = _mm512_shuffle_f32x4(l, p, 0xdd); + + // shuffle 128-bits (composed of 4 32-bit elements) + // a0 b0 c0 d0 ... o0 + // a1 b1 c1 d1 ... o1 + // a2 b2 c2 d2 ... o2 + // a3 b3 c3 d3 ... o3 + // a4 ... + // a5 ... + // a6 ... + // a7 ... + // a8 ... + // a9 ... + // a10 ... + // a11 ... + // a12 ... + // a13 ... + // a14 ... + // a15 b15 c15 d15 ... o15 + a = _mm512_shuffle_f32x4(ta, ti, 0x88); + b = _mm512_shuffle_f32x4(tb, tj, 0x88); + c = _mm512_shuffle_f32x4(tc, tk, 0x88); + d = _mm512_shuffle_f32x4(td, tl, 0x88); + e = _mm512_shuffle_f32x4(te, tm, 0x88); + f = _mm512_shuffle_f32x4(tf, tn, 0x88); + g = _mm512_shuffle_f32x4(tg, to, 0x88); + h = _mm512_shuffle_f32x4(th, tq, 0x88); + i = _mm512_shuffle_f32x4(ta, ti, 0xdd); + j = _mm512_shuffle_f32x4(tb, tj, 0xdd); + k = _mm512_shuffle_f32x4(tc, tk, 0xdd); + l = _mm512_shuffle_f32x4(td, tl, 0xdd); + m = _mm512_shuffle_f32x4(te, tm, 0xdd); + n = _mm512_shuffle_f32x4(tf, tn, 0xdd); + o = _mm512_shuffle_f32x4(tg, to, 0xdd); + p = _mm512_shuffle_f32x4(th, tq, 0xdd); + + // store from registers to dst + _mm512_storeu_ps(&dst[0 * ld_dst], a); + _mm512_storeu_ps(&dst[1 * ld_dst], b); + _mm512_storeu_ps(&dst[2 * ld_dst], c); + _mm512_storeu_ps(&dst[3 * ld_dst], d); + _mm512_storeu_ps(&dst[4 * ld_dst], e); + _mm512_storeu_ps(&dst[5 * ld_dst], f); + _mm512_storeu_ps(&dst[6 * ld_dst], g); + _mm512_storeu_ps(&dst[7 * ld_dst], h); + _mm512_storeu_ps(&dst[8 * ld_dst], i); + _mm512_storeu_ps(&dst[9 * ld_dst], j); + _mm512_storeu_ps(&dst[10 * ld_dst], k); + _mm512_storeu_ps(&dst[11 * ld_dst], l); + _mm512_storeu_ps(&dst[12 * ld_dst], m); + _mm512_storeu_ps(&dst[13 * ld_dst], n); + _mm512_storeu_ps(&dst[14 * ld_dst], o); + _mm512_storeu_ps(&dst[15 * ld_dst], p); +} + +#endif + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_mask.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba1b18372eb549f204bab21856ce7fc4887d283 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_mask.h @@ -0,0 +1,155 @@ +#pragma once + +#include +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + +template +struct VecMaskLoad< + T, + 1, + mask_t, + 1, + typename std::enable_if_t< + std::is_same_v || std::is_same_v || + std::is_same_v, + void>> { + static inline VectorizedN apply( + const T* ptr, + const VecMask& vec_mask) { + at::vec::Vectorized zero_vec(0); + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto int_mask = vec_mask.template cast()[0]; + auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ); + if constexpr (std::is_same_v) { + return Vectorized(_mm512_mask_loadu_ps(zero_vec, mmask, ptr)); + } else { + return Vectorized(_mm512_mask_loadu_epi32(zero_vec, mmask, ptr)); + } + } +}; + +template +struct VecMaskLoad< + data_t, + 1, + mask_t, + 1, + typename std::enable_if< + std::is_same_v || + std::is_same_v>::type> { + static inline VectorizedN apply( + const data_t* ptr, + const VecMask& vec_mask) { + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto int_mask = vec_mask.template cast()[0]; + auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ); + auto zero = _mm256_set1_epi16(0); + auto temp = _mm256_mask_loadu_epi16(zero, mmask, ptr); + return Vectorized( + _mm512_inserti32x8(_mm512_castsi256_si512(temp), zero, 1)); + } +}; + +template +struct VecMaskLoad< + data_t, + 1, + mask_t, + 1, + typename std::enable_if< + std::is_same_v || + std::is_same_v>::type> { + static inline VectorizedN apply( + const data_t* ptr, + const VecMask& vec_mask) { + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto int_mask = vec_mask.template cast()[0]; + auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ); + auto zero = _mm_set1_epi8(0); + auto temp = _mm_mask_loadu_epi8(zero, mmask, ptr); + return Vectorized( + _mm512_inserti64x2(_mm512_set1_epi32(0), temp, 0)); + } +}; + +template +struct VecMaskLoad { + static inline VectorizedN apply( + const int64_t* ptr, + const VecMask& vec_mask) { + auto all_ones = _mm512_set1_epi32(0xFFFFFFFF); + auto zero = _mm512_set1_epi64(0); + auto int_mask = vec_mask.template cast()[0]; + auto mmask = _mm512_cmp_epi32_mask(int_mask, all_ones, _MM_CMPINT_EQ); + at::vec::VectorizedN result; + result[0] = _mm512_mask_loadu_epi64(zero, (__mmask8)mmask, ptr); + result[1] = _mm512_mask_loadu_epi64(zero, (__mmask8)(mmask >> 8), ptr + 8); + return result; + } +}; + +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return Vectorized(_mm512_castsi512_ps(vec_mask[0])); + } +}; + +template <> +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return Vectorized(_mm512_castps_si512(vec_mask[0])); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + auto int_vec = convert(VectorizedN(vec_mask)); + return VecMask(int_vec).cast(); + } +}; + +template <> +inline bool VecMask::all_zero() const { + __mmask16 mask = _mm512_test_epi32_mask(mask_[0], mask_[0]); + return mask == 0; +} + +template <> +inline bool VecMask::is_masked(int i) const { + return _mm512_movepi32_mask(mask_[0]) & (1 << i); +} + +template <> +inline bool VecMask::all_masked() const { + __mmask16 mask = _mm512_movepi32_mask(mask_[0]); + return mask == 0xffff; +} + +#define VEC_MASK_METHOD_WITH_CAST_TO_INT( \ + T, N, return_type, method, args_def, args) \ + template <> \ + inline return_type VecMask::method args_def const { \ + return cast().method args; \ + } + +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_zero, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, is_masked, (int i), (i)) +VEC_MASK_METHOD_WITH_CAST_TO_INT(float, 1, bool, all_masked, (), ()) +VEC_MASK_METHOD_WITH_CAST_TO_INT(int64_t, 2, bool, all_masked, (), ()) + +#undef VEC_MASK_DEFINE_METHOD_WITH_CAST_TO_INT + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h new file mode 100644 index 0000000000000000000000000000000000000000..21389da3cdc0359e69c0d095f5116aa1336e1d1a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h @@ -0,0 +1,1408 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +// This file defines Vectorized<> for the quantized types. +// +// +// Currently, we simply use these classes as efficient converters between +// the quantized types and Vectorized, usually in bandwidth-bound cases +// where doing the arithmetic in full-precision is acceptable (e.g. +// elementwise operators). +// +// +// Conversions are as follows: +// Vectorized -> 4x Vectorized +// Vectorized -> 4x Vectorized +// Vectorized -> 1x Vectorized +// +// The size of the returned float vector is specified by the special +// constexpr function float_num_vecs. The type of the value returned +// from dequantize (and expected as an argument to quantize) is +// specified by float_vec_return_type. +// +// When writing kernels with these vectors, it is expected that floating- +// point operations will be carried out in a loop over Vectorized::float_num_vecs +// iterations. + +namespace at { +namespace vec { +inline namespace CPU_CAPABILITY { + +#if defined(CPU_CAPABILITY_AVX512) + +#ifdef _MSC_VER +__declspec(align(64)) struct Vectorizedqi { + protected: + __m512i vals; +#else +struct Vectorizedqi { + protected: + __m512i vals __attribute__((aligned(64))); +#endif + + public: + Vectorizedqi() {} + Vectorizedqi(__m512i v) : vals(v) {} + operator __m512i() const { + return vals; + } +}; + + +template +__m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + T min_val, + T max_val); + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + int32_t min_val, + int32_t max_val) { + // This function is for linkage only, will not be used + AT_ERROR("pack_saturate_and_clamp is not supported"); +} + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + int8_t min_val, + int8_t max_val) { + __m512i packed_and_sat = _mm512_packs_epi16(first, second); + return _mm512_max_epi8( + _mm512_set1_epi8(min_val), + _mm512_min_epi8(packed_and_sat, _mm512_set1_epi8(max_val))); +} + +template <> +inline __m512i pack_saturate_and_clamp( + __m512i first, + __m512i second, + uint8_t min_val, + uint8_t max_val) { + __m512i packed_and_sat = _mm512_packus_epi16(first, second); + return _mm512_max_epu8( + _mm512_set1_epi8(min_val), + _mm512_min_epu8(packed_and_sat, _mm512_set1_epi8(max_val))); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_int8_to_float(at::vec::Vectorized src) { + // Note: this function only convert inputs number of elements equal to at::vec::Vectorized.size() + // Only handle first 16*8 bits + __m128i input_128 = _mm512_castsi512_si128(src); + // Convert from 16*uint8/int8 to 16*int32 + __m512i input_512_extended; + if constexpr (std::is_same_v) + input_512_extended = _mm512_cvtepu8_epi32(input_128); + else + input_512_extended = _mm512_cvtepi8_epi32(input_128); + // Convert from 16*int32 to 16*float32 + return _mm512_cvtepi32_ps(input_512_extended); +} + +template +typename std::enable_if_t || std::is_same_v, at::vec::Vectorized> +inline convert_float_to_int8(at::vec::Vectorized src) { + // Convert from float32 to int32 with truncation + __m512i x_values_int32 = _mm512_cvttps_epi32(src); + + // Convert from int32 to int16 using signed saturation + __m512i xy_packed_v = _mm512_packs_epi32(x_values_int32, x_values_int32); + + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + + // Convert from int16 to uint8/int8 using unsigned saturation + __m512i xyzw_clamped_v = pack_saturate_and_clamp( + xy_packed_v, xy_packed_v, min_val, max_val); + __m512i permute_mask_v = + _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, + 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); + return _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v); +} + +template +__FORCE_INLINE void QuantizeAvx512( + const float* src, + T* dst, + int len, + float inverse_scale, + int64_t zero_point) { + constexpr int VLEN = 16; + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + const __m512i min_v = _mm512_set1_epi32(min_val); + const __m512i max_v = _mm512_set1_epi32(max_val); + // This is the largest int32 value < int32_max exactly representable in float + constexpr int32_t int32_float_max_val = + std::numeric_limits::max() - 127; + int i = 0; + __m512 inverse_scale_v = _mm512_set1_ps(inverse_scale); + // clang-format off + static const __m512i shuffle_mask_v = _mm512_set_epi8( + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0x0c, 0x08, 0x04, 0x00); + // clang-format on + __m512i permute_mask_v = + _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, + 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); + __m512i permute_mask_l8_v = + _mm512_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0c, 0x08, 0x04, 0x00); + int len_aligned = len / (VLEN * 4) * (VLEN * 4); + for (; i < len_aligned; i += 4 * VLEN) { + // x + __m512 x_vals = _mm512_load_ps(src + i); + __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v); + // If the floating point value is greater than int32_max, + // _mm512_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to + // Clip at int32_float_max_val to avoid this. + x_transformed_v = + _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // y + __m512 y_vals = _mm512_load_ps(src + i + VLEN); + __m512 y_transformed_v = _mm512_mul_ps(y_vals, inverse_scale_v); + y_transformed_v = + _mm512_min_ps(y_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // z + __m512 z_vals = _mm512_load_ps(src + i + 2 * VLEN); + __m512 z_transformed_v = _mm512_mul_ps(z_vals, inverse_scale_v); + z_transformed_v = + _mm512_min_ps(z_transformed_v, _mm512_set1_ps(int32_float_max_val)); + // w + __m512 w_vals = _mm512_load_ps(src + i + 3 * VLEN); + __m512 w_transformed_v = _mm512_mul_ps(w_vals, inverse_scale_v); + w_transformed_v = + _mm512_min_ps(w_transformed_v, _mm512_set1_ps(int32_float_max_val)); + + __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v); + __m512i y_rounded_v = _mm512_cvtps_epi32(y_transformed_v); + __m512i z_rounded_v = _mm512_cvtps_epi32(z_transformed_v); + __m512i w_rounded_v = _mm512_cvtps_epi32(w_transformed_v); + + // add zero point + x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); + y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point)); + z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point)); + w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point)); + + __m512i xy_packed_v = _mm512_packs_epi32(x_rounded_v, y_rounded_v); + __m512i zw_packed_v = _mm512_packs_epi32(z_rounded_v, w_rounded_v); + __m512i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + xyzw_clamped_v = + _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v); + _mm512_storeu_si512(reinterpret_cast<__m512i*>(dst + i), xyzw_clamped_v); + } + + // Additional 8-lane AVX512 version to take advantage when len is smaller + // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM) + for (; i < len / VLEN * VLEN; i += VLEN) { + __m512 x_vals = _mm512_load_ps(src + i); + __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v); + x_transformed_v = + _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val)); + __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v); + x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point)); + __m512i x_clipped_v = + _mm512_max_epi32(min_v, _mm512_min_epi32(max_v, x_rounded_v)); + + x_clipped_v = _mm512_shuffle_epi8(x_clipped_v, shuffle_mask_v); + x_clipped_v = _mm512_permutexvar_epi32(permute_mask_l8_v, x_clipped_v); + _mm_storeu_si128( + reinterpret_cast<__m128i*>(dst + i), + _mm512_castsi512_si128(x_clipped_v)); + } + + for (; i < len; ++i) { + float transformed = src[i] * inverse_scale; + + // Not exactly the same behavior as the vectorized code. + // The vectorized code above always rounds to even in halfway cases + // (https://software.intel.com/en-us/node/523819), but std::nearbyint + // does the same only when the current rounding mode is FE_TONEAREST. + // However, in practice, this should not be a problem because most cases + // use the default rounding mode FE_TONEAREST. + // Note that we cannot implement the same behavior as the vectorized code + // using std::round because it does rounding away from zero in halfway + // cases. + transformed = zero_point + std::nearbyint(transformed); + float clipped = + std::min(std::max(transformed, float(min_val)), float(max_val)); + dst[i] = clipped; + } +} + +template<> +struct Vectorized : public Vectorizedqi { + using size_type = int; + static constexpr size_type size() { + return 16; + } + + static constexpr int float_num_vecs() { + return 1; + } + + static constexpr int int_num_vecs() { + return 1; + } + + using float_vec_return_type = std::array, 1>; + using int_vec_return_type = std::array, 1>; + using value_type = c10::qint32::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint32& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi32(uw); + } + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + __m512 float_vals = _mm512_cvtepi32_ps(vals); + return {vec::fmadd(scale, Vectorized(float_vals), scale_zp_premul)}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + __m512 float_vals = _mm512_cvtepi32_ps(vals); + return {(Vectorized(float_vals) - zero_point) * scale}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + Vectorized retval; + auto rhs_data = (__m512)rhs[0]; + at::native::quantize_vec( + scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 16); + return retval; + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epi32(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epi32(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epi32( + _mm512_max_epi32(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + return {_mm512_sub_epi32(vals, b)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + + __m512 scaled = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier_v); + __m512i rounded = _mm512_cvtps_epi32(scaled); + return _mm512_add_epi32(rounded, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + return _mm512_mullo_epi32(a, b); +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + return _mm512_add_epi32(a, b); +} + +/* + * Convert values from int32 back to int8/uint8 + */ +template +__m512i RequantizeAvx512( + const std::array, 4>& inp, + __m512 multiplier, + __m512i zp) { + static_assert( + std::is_same_v || std::is_same_v, + "Only int8_t/uint8_t are supported"); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + __m512i permute_mask_v = + _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02, + 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00); + __m512 x_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier); + __m512 y_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[1]), multiplier); + __m512 z_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[2]), multiplier); + __m512 w_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[3]), multiplier); + + __m512i x_rounded_v = _mm512_cvtps_epi32(x_scaled_v); + __m512i y_rounded_v = _mm512_cvtps_epi32(y_scaled_v); + __m512i z_rounded_v = _mm512_cvtps_epi32(z_scaled_v); + __m512i w_rounded_v = _mm512_cvtps_epi32(w_scaled_v); + + /* Add zero point */ + __m512i x_v = _mm512_add_epi32(x_rounded_v, zp); + __m512i y_v = _mm512_add_epi32(y_rounded_v, zp); + __m512i z_v = _mm512_add_epi32(z_rounded_v, zp); + __m512i w_v = _mm512_add_epi32(w_rounded_v, zp); + + /* Pack to int16_t and saturate */ + __m512i xy_packed_v = _mm512_packs_epi32(x_v, y_v); + __m512i zw_packed_v = _mm512_packs_epi32(z_v, w_v); + + __m512i xyzw_clamped_v = + pack_saturate_and_clamp(xy_packed_v, zw_packed_v, min_val, max_val); + + /* + * xyzw_clamped_v has results in the following layout so we need to + * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 x8-11 y8-11 z8-11 w8-11 x12-15 y12-15 z12-15 w12-15 + */ + xyzw_clamped_v = _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v); + return xyzw_clamped_v; +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 64; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::qint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + + Vectorized() {} + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::qint8& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi8(uw); + } + + // This is needed because the compiler emits awful code for the default + // constructor for moving the enum + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + + // This is added to avoid error: definition of implicit copy assignment operator + // for 'Vectorized' is deprecated because it has a user-declared + // copy constructor [-Werror,-Wdeprecated-copy] + Vectorized& operator=(const Vectorized&) = default; + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + private: + __m512i cvtepi8_epi32(__m128i epi8_vals) const { + return _mm512_cvtepi8_epi32(epi8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_neg_zp_premul) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_neg_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_neg_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_neg_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_neg_zp_premul); + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + int8_t quantized_values[64]; + QuantizeAvx512( + rhs_data, quantized_values, 64, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epi8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epi8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epi8( + _mm512_max_epi8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512i int32_val0 = cvtepi8_epi32(int_val0); + __m512i int32_val1 = cvtepi8_epi32(int_val1); + __m512i int32_val2 = cvtepi8_epi32(int_val2); + __m512i int32_val3 = cvtepi8_epi32(int_val3); + + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_b0 = _mm_set_epi64x(b.vals.m512i_u64[1], b.vals.m512i_u64[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals.m512i_u64[3], b.vals.m512i_u64[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals.m512i_u64[5], b.vals.m512i_u64[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals.m512i_u64[7], b.vals.m512i_u64[6]); + #else + __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]); + #endif + + __m512i int32_b0 = cvtepi8_epi32(int_b0); + __m512i int32_b1 = cvtepi8_epi32(int_b1); + __m512i int32_b2 = cvtepi8_epi32(int_b2); + __m512i int32_b3 = cvtepi8_epi32(int_b3); + + __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0); + __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1); + __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2); + __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3); + + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + return RequantizeAvx512(inp, multiplier_v, zero_point_v); + } + + private: + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template<> +struct Vectorized : public Vectorizedqi { + static constexpr int size() { + return 64; + } + + static constexpr int float_num_vecs() { + return 4; + } + + static constexpr int int_num_vecs() { + return 4; + } + + using float_vec_return_type = std::array, 4>; + using int_vec_return_type = std::array, 4>; + using value_type = typename c10::quint8::underlying; + + public: + using Vectorizedqi::Vectorizedqi; + Vectorized() {} + + Vectorized(__m512i vals_) { vals = vals_;} + + // Broadcast constructor + Vectorized(const c10::quint8& val) { + value_type uw = val.val_; + vals = _mm512_set1_epi8(uw); + } + + Vectorized(const Vectorized& other) : Vectorizedqi(other.vals) { } + + // This is added to avoid error: definition of implicit copy assignment operator + // for 'Vectorized' is deprecated because it has a user-declared + // copy constructor [-Werror,-Wdeprecated-copy] + Vectorized& operator=(const Vectorized&) = default; + + void store(void* ptr, int count = size()) const { + if (count != size()) { + memcpy(ptr, &vals, count * sizeof(value_type)); + } else { + _mm512_storeu_si512((__m512i*)ptr, vals); + } + } + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + private: + __m512i cvtepu8_epi32(__m128i epu8_vals) const { + return _mm512_cvtepu8_epi32(epu8_vals); + } + + public: + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = + vec::fmadd(scale, Vectorized(float_val0), scale_zp_premul); + auto val1 = + vec::fmadd(scale, Vectorized(float_val1), scale_zp_premul); + auto val2 = + vec::fmadd(scale, Vectorized(float_val2), scale_zp_premul); + auto val3 = + vec::fmadd(scale, Vectorized(float_val3), scale_zp_premul); + + return {val0, val1, val2, val3}; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0)); + __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1)); + __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2)); + __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3)); + + auto val0 = (Vectorized(float_val0) - zero_point) * scale; + auto val1 = (Vectorized(float_val1) - zero_point) * scale; + auto val2 = (Vectorized(float_val2) - zero_point) * scale; + auto val3 = (Vectorized(float_val3) - zero_point) * scale; + + return {val0, val1, val2, val3}; + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + auto* rhs_data = (float*)rhs.data(); + uint8_t quantized_values[64]; + QuantizeAvx512( + rhs_data, quantized_values, 64, inverse_scale, zero_point); + return Vectorized::loadu(quantized_values); + } + + Vectorized maximum(Vectorized b) const { + return _mm512_max_epu8(vals, b.vals); + } + + Vectorized minimum(Vectorized b) const { + return _mm512_min_epu8(vals, b.vals); + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + return _mm512_min_epu8( + _mm512_max_epu8(vals, zero_point.vals), q_six.vals); + } + + int_vec_return_type widening_subtract(Vectorized b) const { + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_val0 = _mm_set_epi64x(vals.m512i_u64[1], vals.m512i_u64[0]); + __m128i int_val1 = _mm_set_epi64x(vals.m512i_u64[3], vals.m512i_u64[2]); + __m128i int_val2 = _mm_set_epi64x(vals.m512i_u64[5], vals.m512i_u64[4]); + __m128i int_val3 = _mm_set_epi64x(vals.m512i_u64[7], vals.m512i_u64[6]); + #else + __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]); + __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]); + __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]); + __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]); + #endif + + __m512i int32_val0 = cvtepu8_epi32(int_val0); + __m512i int32_val1 = cvtepu8_epi32(int_val1); + __m512i int32_val2 = cvtepu8_epi32(int_val2); + __m512i int32_val3 = cvtepu8_epi32(int_val3); + + #if defined(_MSC_VER) && !defined(__clang__) + __m128i int_b0 = _mm_set_epi64x(b.vals.m512i_u64[1], b.vals.m512i_u64[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals.m512i_u64[3], b.vals.m512i_u64[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals.m512i_u64[5], b.vals.m512i_u64[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals.m512i_u64[7], b.vals.m512i_u64[6]); + #else + __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]); + __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]); + __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]); + __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]); + #endif + + __m512i int32_b0 = cvtepu8_epi32(int_b0); + __m512i int32_b1 = cvtepu8_epi32(int_b1); + __m512i int32_b2 = cvtepu8_epi32(int_b2); + __m512i int32_b3 = cvtepu8_epi32(int_b3); + + __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0); + __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1); + __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2); + __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3); + return {Vectorized(res_0), + Vectorized(res_1), + Vectorized(res_2), + Vectorized(res_3)}; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + __m512 multiplier_v = _mm512_set1_ps(multiplier); + __m512i zero_point_v = _mm512_set1_epi32(zero_point); + return RequantizeAvx512(inp, multiplier_v, zero_point_v); + } + + private: + + // Load from memory constructor + Vectorized(const void* ptr) { + vals = _mm512_loadu_si512((const __m512i*)ptr); + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#else + +// NOTE: These are low-performance implementations that we fall back on. + +template < + typename T, + typename float_vec_return_type_, + typename int_vec_return_type_, + int size_> +struct VectorizedQuantizedConverter { + static constexpr int size() { + return size_; + } + + static constexpr int float_num_vecs() { + return size() / 8; + } + + static constexpr int int_num_vecs() { + return size() / 8; + } + + using float_vec_return_type = float_vec_return_type_; + using int_vec_return_type = int_vec_return_type_; + + using value_type = typename T::underlying; + std::array vals; + + VectorizedQuantizedConverter(T val) { + for (const auto i : c10::irange(size())) { + vals[i] = val.val_; + } + } + + VectorizedQuantizedConverter(const void* ptr) { + memcpy(vals.data(), ptr, sizeof(value_type) * size()); + } + + void store(void* ptr, int count = size()) const { + memcpy(ptr, vals.data(), count * sizeof(value_type)); + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point, + Vectorized scale_zp_premul) const { + float_vec_return_type rv; + for (const auto i : c10::irange(float_num_vecs())) { + float tmp_vals[16]; + for (const auto j : c10::irange(16)) { + tmp_vals[j] = at::native::dequantize_val( + scale[j], zero_point[j], T(vals[16 * i + j])); + } + rv[i] = Vectorized(tmp_vals[0], + tmp_vals[1], + tmp_vals[2], + tmp_vals[3], + tmp_vals[4], + tmp_vals[5], + tmp_vals[6], + tmp_vals[7], + tmp_vals[8], + tmp_vals[9], + tmp_vals[10], + tmp_vals[11], + tmp_vals[12], + tmp_vals[13], + tmp_vals[14], + tmp_vals[15]); + } + return rv; + } + + float_vec_return_type dequantize( + Vectorized scale, + Vectorized zero_point) const { + Vectorized scale_zp_premul; + return dequantize(scale, zero_point, scale_zp_premul); + } + + protected: + VectorizedQuantizedConverter() {} +}; + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>() {} + Vectorized(c10::qint32 val) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint32, + std::array, 1>, + std::array, 1>, + 16>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint32*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + for (const auto i : c10::irange(size())) { + retval[0].vals[i] = vals[i] - b.vals[i]; + } + return retval; + } + + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = + std::nearbyint(static_cast(inp[0].vals[i]) * multiplier) + + zero_point; + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +Vectorized inline operator*( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] * b.vals[i]; + } + return retval; +} + +template <> +Vectorized inline operator+( + const Vectorized& a, + const Vectorized& b) { + Vectorized retval; + for (const auto i : c10::irange(std::decay_t::size())) { + retval.vals[i] = a.vals[i] + b.vals[i]; + } + return retval; +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64> { + Vectorized() + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>() {} + Vectorized(c10::qint8 val) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::qint8, + std::array, 4>, + std::array, 4>, + 64>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::qint8*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +template <> +struct Vectorized : public VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64> { + Vectorized() + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>() {} + Vectorized(c10::quint8 val) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>(val) {} + Vectorized(const void* ptr) + : VectorizedQuantizedConverter< + c10::quint8, + std::array, 4>, + std::array, 4>, + 64>(ptr) {} + + static Vectorized loadu(const void* ptr) { + return Vectorized(ptr); + } + + static Vectorized loadu(const void* ptr, int64_t count) { + __at_align__ value_type tmp_values[size()]; + // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 + // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two + // instructions while a loop would be compiled to one instruction. + for (const auto i : c10::irange(size())) { + tmp_values[i] = 0; + } + std::memcpy(tmp_values, reinterpret_cast(ptr), count * sizeof(value_type)); + return loadu(tmp_values); + } + + static Vectorized quantize( + const float_vec_return_type& rhs, + float scale, + int32_t zero_point, + float inverse_scale) { + std::array qvals; + std::array float_vals; + + for (const auto i : c10::irange(float_num_vecs())) { + rhs[i].store(&float_vals[i * 16], 16); + } + + at::native::quantize_vec( + scale, + zero_point, + float_vals.data(), + (c10::quint8*)qvals.data(), + 16 * float_num_vecs()); + + return Vectorized::loadu(qvals.data()); + } + + Vectorized maximum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::max(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized minimum(Vectorized b) const { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min(vals[i], b.vals[i]); + } + return retval; + } + + Vectorized relu(Vectorized zero_point) const { + return maximum(zero_point); + } + + + Vectorized relu6( + Vectorized zero_point, + Vectorized q_six) { + Vectorized retval; + for (const auto i : c10::irange(size())) { + retval.vals[i] = std::min( + std::max(vals[i], zero_point.vals[i]), q_six.vals[i]); + } + return retval; + } + + int_vec_return_type widening_subtract(Vectorized b) const { + int_vec_return_type retval; + constexpr int elem_per_int_vec = size() / int_num_vecs(); + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + retval[i].vals[j] = + static_cast(vals[i * elem_per_int_vec + j]) - + static_cast(b.vals[i * elem_per_int_vec + j]); + } + } + return retval; + } + static Vectorized requantize_from_int( + const int_vec_return_type& inp, + float multiplier, + int32_t zero_point) { + constexpr int elem_per_int_vec = size() / int_num_vecs(); + constexpr auto min_val = std::numeric_limits::min(); + constexpr auto max_val = std::numeric_limits::max(); + Vectorized retval; + for (const auto i : c10::irange(int_num_vecs())) { + for (const auto j : c10::irange(elem_per_int_vec)) { + int32_t rounded = + std::nearbyint(static_cast(inp[i].vals[j]) * multiplier) + + zero_point; + retval.vals[i * elem_per_int_vec + j] = + std::min(std::max(rounded, min_val), max_val); + } + } + return retval; + } +}; + +template <> +Vectorized inline maximum(const Vectorized& a, const Vectorized& b) { + return a.maximum(b); +} + +#endif // defined(CPU_CAPABILITY_AVX512) && !defined(MSVC) + +}}} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h new file mode 100644 index 0000000000000000000000000000000000000000..d696c97b5949781843cd4738b70c9d8e7114896d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h @@ -0,0 +1,1134 @@ +#pragma once + +// DO NOT DEFINE STATIC DATA IN THIS HEADER! +// See Note [Do not compile initializers with AVX] +// +// Note [Do not compile initializers with AVX] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// If you define a static initializer in this file, the initialization will use +// AVX instructions because these object files are compiled with AVX enabled. +// We need to avoid non-trivial global data in these architecture specific files +// because there's no way to guard the global initializers with CPU capability +// detection. +// +// See https://github.com/pytorch/pytorch/issues/37577 for an instance +// of this bug in the past. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__GNUC__) +#define __FORCE_INLINE __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +#define __FORCE_INLINE __forceinline +#endif + +// These macros helped us unify vec_base.h +#ifdef CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(64))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(64)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 64 +#define int_vector __m512i +#else // CPU_CAPABILITY_AVX512 +#if defined(__GNUC__) +#define __at_align__ __attribute__((aligned(32))) +#elif defined(_WIN32) +#define __at_align__ __declspec(align(32)) +#else +#define __at_align__ +#endif +#define VECTOR_WIDTH 32 +#define int_vector __m256i +#endif // CPU_CAPABILITY_AVX512 + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { +// at::Half and at::BFloat16 should be treated as floating point +template +struct is_floating_point: + std::integral_constant || + std::is_same_v || + std::is_same_v> { +}; + +template +constexpr bool is_floating_point_v = is_floating_point::value; + +template +struct is_reduced_floating_point: + std::integral_constant || + std::is_same_v> { +}; + +template +constexpr bool is_reduced_floating_point_v = is_reduced_floating_point::value; + +template +struct is_8bit_integer: + std::integral_constant || + std::is_same_v> { +}; + +template +constexpr bool is_8bit_integer_v = is_8bit_integer::value; + +template struct int_of_size; + +#define DEFINE_INT_OF_SIZE(int_t) \ +template<> struct int_of_size { using type = int_t; } + +DEFINE_INT_OF_SIZE(int64_t); +DEFINE_INT_OF_SIZE(int32_t); +DEFINE_INT_OF_SIZE(int16_t); +DEFINE_INT_OF_SIZE(int8_t); + +#undef DEFINE_INT_OF_SIZE + +template +using int_same_size_t = typename int_of_size::type; + +// NOTE: If you specialize on a type, you must define all operations! + +// emulates Vectorized types +#if defined(__s390x__) +template +#else +template +#endif +struct Vectorized { +private: + __at_align__ T values[VECTOR_WIDTH / sizeof(T)]; +public: + using value_type = T; + using size_type = int; + // Note [constexpr static function to avoid odr-usage compiler bug] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Why, you might ask, is size defined to be a static constexpr function, + // rather than a more ordinary 'static constexpr int size;' variable? + // The problem lies within ODR rules for static constexpr members versus + // static constexpr functions. First, recall that this class (along with all + // of its derivations) live in an anonymous namespace: they are intended to be + // *completely* inlined at their use-sites, because we need to compile it + // multiple times for different instruction sets. + // + // Because of this constraint, we CANNOT provide a single definition for + // any static members in this class; since we want to compile the class + // multiple times, there wouldn't actually be any good place to put the + // definition. Now here is the problem: if we ODR-use a static constexpr + // member, we are *obligated* to provide a definition. Without the + // definition, you get a compile error like: + // + // relocation R_X86_64_PC32 against undefined symbol + // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making + // a shared object; recompile with -fPIC + // + // If this were C++17, we could replace a static constexpr variable with + // an inline variable which doesn't require one definition. But we are not + // C++17. So the next best thing is to replace the member with a static + // constexpr (and therefore inline) function, which does not require ODR + // either. + // + // Also, technically according to the C++ standard, we don't have to define + // a constexpr variable if we never odr-use it. But it seems that some + // versions GCC/Clang have buggy determinations on whether or not an + // identifier is odr-used or not, and in any case it's hard to tell if + // a variable is odr-used or not. So best to just cut the problem at the root. + static constexpr size_type size() { + return VECTOR_WIDTH / sizeof(T); + } + Vectorized() : values{static_cast(0)} {} + Vectorized(T val) { + for (int i = 0; i != size(); i++) { + values[i] = val; + } + } + template> + Vectorized(Args... vals) : values{vals...}{ + } + // This also implies const T& operator[](int idx) const + inline operator const T*() const { + return values; + } + // This also implies T& operator[](int idx) + inline operator T*() { + return values; + } + // Return the values as char* for type punning + auto as_bytes() const -> const char* { + return reinterpret_cast(values); + } + template + static Vectorized blend(const Vectorized& a, const Vectorized& b) { + int64_t mask = mask_; + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (mask & 0x01) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + mask = mask >> 1; + } + return vector; + } + static Vectorized blendv(const Vectorized& a, const Vectorized& b, + const Vectorized& mask) { + Vectorized vector; + int_same_size_t buffer[size()]; + mask.store(buffer); + for (const auto i : c10::irange(size())) { + if (buffer[i] & 0x01) + { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + template // step sometimes requires a higher precision type (e.g., T=int, step_t=double) + static Vectorized arange(T base = static_cast(0), step_t step = static_cast(1)) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + vector.values[i] = base + i * step; + } + return vector; + } + static Vectorized set(const Vectorized& a, const Vectorized& b, int64_t count = size()) { + Vectorized vector; + for (const auto i : c10::irange(size())) { + if (i < count) { + vector[i] = b[i]; + } else { + vector[i] = a[i]; + } + } + return vector; + } + static Vectorized loadu(const void* ptr) { + Vectorized vector; + std::memcpy(vector.values, ptr, VECTOR_WIDTH); + return vector; + } + static Vectorized loadu(const void* ptr, int64_t count) { + Vectorized vector; + std::memcpy(vector.values, ptr, count * sizeof(T)); + return vector; + } + static Vectorized loadu_one_fourth(const void* ptr) { + static_assert(std::is_same_v || std::is_same_v, "For byte types only"); + return Vectorized::loadu(ptr, 8); + } + + void store(void* ptr, int count = size()) const { + std::memcpy(ptr, values, count * sizeof(T)); + } + int zero_mask() const { + // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit + int mask = 0; + for (int i = 0; i < size(); ++ i) { + if (values[i] == static_cast(0)) { + mask |= (1 << i); + } + } + return mask; + } + Vectorized isnan() const { + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (_isnan(values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + bool has_inf_nan() const { + for (int64_t i = 0; i != size(); i++) { + if(_isnan(values[i]) || _isinf(values[i])) { + return true; + } + } + return false; + } + Vectorized map(T (*const f)(T)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + Vectorized map(T (*const f)(const T &)) const { + Vectorized ret; + for (int64_t i = 0; i != size(); i++) { + ret[i] = f(values[i]); + } + return ret; + } + template && !c10::is_complex::value, int> = 0> + Vectorized abs() const { + // other_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_abs must be T"); + return map([](T x) -> T { return x < static_cast(0) ? -x : x; }); + } + template , int> = 0> + Vectorized abs() const { + // float_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "float_t_abs must be T"); + // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in + // 0.0) properly. + return map([](T x) -> T { return std::abs(x); }); + } + template ::value, int> = 0> + Vectorized abs() const { + // complex_t_abs is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_abs must be T"); + // Specifically map() does not perform the type conversion needed by abs. + return map([](T x) { return static_cast(std::abs(x)); }); + } + + template ::value, int> = 0> + Vectorized sgn() const { + return map(at::native::sgn_impl); + } + + template ::value, int> = 0> + Vectorized angle() const { + // other_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_angle must be T"); + return map(at::native::angle_impl); // compiler is unable to resolve the overload without + } + template ::value, int> = 0> + Vectorized angle() const { + // complex_t_angle is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_angle must be T"); + return map([](T x) { return static_cast(std::arg(x)); }); + } + template ::value, int> = 0> + Vectorized real() const { + // other_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_real must be T"); + return *this; + } + template ::value, int> = 0> + Vectorized real() const { + // complex_t_real is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_real must be T"); + return map([](T x) { return static_cast(x.real()); }); + } + template ::value, int> = 0> + Vectorized imag() const { + // other_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_imag must be T"); + return Vectorized(0); + } + template ::value, int> = 0> + Vectorized imag() const { + // complex_t_imag is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_imag must be T"); + return map([](T x) { return static_cast(x.imag()); }); + } + template ::value, int> = 0> + Vectorized conj() const { + // other_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_conj must be T"); + return *this; + } + template ::value, int> = 0> + Vectorized conj() const { + // complex_t_conj is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_conj must be T"); + return map([](T x) { return static_cast(std::conj(x)); }); + } + Vectorized acos() const { + return map(std::acos); + } + Vectorized acosh() const { + return map(std::acosh); + } + Vectorized asin() const { + return map(std::asin); + } + Vectorized atan() const { + return map(std::atan); + } + Vectorized atanh() const { + return map(std::atanh); + } + Vectorized atan2(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::atan2(values[i], exp[i]); + } + return ret; + } + template < + typename U = T, + typename std::enable_if_t, int> = 0> + Vectorized copysign(const Vectorized &sign) const { + Vectorized ret; + for (size_type i = 0; i < size(); i++) { + ret[i] = c10::copysign(values[i], sign[i]); + } + return ret; + } + Vectorized erf() const { + return map(std::erf); + } + Vectorized erfc() const { + return map(std::erfc); + } + Vectorized erfinv() const { + return map(calc_erfinv); + } + Vectorized exp() const { + return map(std::exp); + } + Vectorized exp2() const { + return map(exp2_impl); + } + Vectorized expm1() const { + return map(std::expm1); + } + Vectorized exp_u20() const { + return map(std::exp); + } + Vectorized frac() const { + return *this - this->trunc(); + } + template < + typename U = T, + typename std::enable_if_t, int> = 0> + Vectorized fmod(const Vectorized& q) const { + // U is for SFINAE purposes only. Make sure it is not changed. + static_assert(std::is_same_v, "U must be T"); + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::fmod(values[i], q[i]); + } + return ret; + } + Vectorized log() const { + return map(std::log); + } + Vectorized log10() const { + return map(std::log10); + } + Vectorized log1p() const { + return map(std::log1p); + } + template ::value, int> = 0> + Vectorized log2() const { + // other_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "other_t_log2 must be T"); + return map(std::log2); + } + template ::value, int> = 0> + Vectorized log2() const { + // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed. + static_assert(std::is_same_v, "complex_t_log2 must be T"); + const T log_2 = T(std::log(2.0)); + return Vectorized(map(std::log))/Vectorized(log_2); + } + Vectorized ceil() const { + return map(at::native::ceil_impl); + } + Vectorized cos() const { + return map(std::cos); + } + Vectorized cosh() const { + return map(std::cosh); + } + Vectorized floor() const { + return map(at::native::floor_impl); + } + Vectorized hypot(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::hypot(values[i], b[i]); + } + return ret; + } + Vectorized i0() const { + return map(calc_i0); + } + Vectorized i0e() const { + return map(calc_i0e); + } + Vectorized digamma() const { + return map(calc_digamma); + } + Vectorized igamma(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igamma(values[i], x[i]); + } + return ret; + } + Vectorized igammac(const Vectorized &x) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = calc_igammac(values[i], x[i]); + } + return ret; + } + Vectorized neg() const { + // NB: the trailing return type is needed because we need to coerce the + // return value back to T in the case of unary operator- incuring a + // promotion + return map([](T x) -> T { return -x; }); + } + Vectorized nextafter(const Vectorized &b) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::nextafter(values[i], b[i]); + } + return ret; + } + Vectorized round() const { + // We do not use std::round because we would like to round midway numbers to the nearest even integer. + return map(at::native::round_impl); + } + Vectorized sin() const { + return map(std::sin); + } + Vectorized sinh() const { + return map(std::sinh); + } + Vectorized tan() const { + return map(std::tan); + } + Vectorized tanh() const { + return map(std::tanh); + } + Vectorized trunc() const { + return map(at::native::trunc_impl); + } + Vectorized lgamma() const { + return map(std::lgamma); + } + Vectorized sqrt() const { + return map(std::sqrt); + } + Vectorized reciprocal() const { + return map([](T x) { return (T)(1) / x; }); + } + Vectorized rsqrt() const { + return map([](T x) { return (T)1 / std::sqrt(x); }); + } + Vectorized pow(const Vectorized &exp) const { + Vectorized ret; + for (const auto i : c10::irange(size())) { + ret[i] = std::pow(values[i], exp[i]); + } + return ret; + } +private: + template + inline Vectorized binary_pred(const Vectorized& other, Op op) const { + // All bits are set to 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int64_t i = 0; i != size(); i++) { + if (op(values[i], other.values[i])) { + std::memset(static_cast(vector.values + i), 0xFF, sizeof(T)); + } else { + std::memset(static_cast(vector.values + i), 0, sizeof(T)); + } + } + return vector; + } + +public: + Vectorized operator==(const Vectorized& other) const { return binary_pred(other, std::equal_to()); } + Vectorized operator!=(const Vectorized& other) const { return binary_pred(other, std::not_equal_to()); } + Vectorized operator>=(const Vectorized& other) const { return binary_pred(other, std::greater_equal()); } + Vectorized operator<=(const Vectorized& other) const { return binary_pred(other, std::less_equal()); } + Vectorized operator>(const Vectorized& other) const { return binary_pred(other, std::greater()); } + Vectorized operator<(const Vectorized& other) const { return binary_pred(other, std::less()); } + +private: + template + inline Vectorized binary_pred_bool(const Vectorized& other, Op op) const { + // 1 if the pred is true, otherwise 0. + Vectorized vector; + for (int i = 0; i != size(); ++ i) { + vector[i] = static_cast(op(values[i], other.values[i])); + } + return vector; + } + +public: + Vectorized eq(const Vectorized& other) const { return binary_pred_bool(other, std::equal_to()); } + Vectorized ne(const Vectorized& other) const { return binary_pred_bool(other, std::not_equal_to()); } + Vectorized gt(const Vectorized& other) const { return binary_pred_bool(other, std::greater()); } + Vectorized ge(const Vectorized& other) const { return binary_pred_bool(other, std::greater_equal()); } + Vectorized lt(const Vectorized& other) const { return binary_pred_bool(other, std::less()); } + Vectorized le(const Vectorized& other) const { return binary_pred_bool(other, std::less_equal()); } +}; + +template Vectorized inline operator+(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] + b[i]; + } + return c; +} + +template Vectorized inline operator-(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] - b[i]; + } + return c; +} + +template Vectorized inline operator*(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] * b[i]; + } + return c; +} + +template Vectorized inline operator/(const Vectorized &a, const Vectorized &b) __ubsan_ignore_float_divide_by_zero__ { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] / b[i]; + } + return c; +} + +template , int> = 0> +Vectorized inline operator%(const Vectorized &a, const Vectorized &b) __ubsan_ignore_float_divide_by_zero__ { + return a - a / b * b; +} + +template Vectorized inline operator||( + const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] || b[i]; + } + return c; +} + +// Implements the IEEE 754 201X `maximum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int> = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] > b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline maximum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +// Implements the IEEE 754 201X `minimum` operation, which propagates NaN if +// either input is a NaN. +template ::value, int> = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (a[i] < b[i]) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline minimum(const Vectorized &a, const Vectorized &b) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i]; + if (_isnan(a[i])) { + // If either input is NaN, propagate a NaN. + // NOTE: The case where b[i] was NaN is handled correctly by the naive + // ternary operator above. + c[i] = a[i]; + } + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp(const Vectorized &a, const Vectorized &min_vec, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]); + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp_max(const Vectorized &a, const Vectorized &max_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i]; + } + return c; +} + +template ::value, int> = 0> +Vectorized inline clamp_min(const Vectorized &a, const Vectorized &min_vec) { + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i]; + } + return c; +} + +struct Vectorizedi; + +#if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + int_vector buffer; +#if defined(CPU_CAPABILITY_AVX2) + int_vector a_buffer = _mm256_load_si256(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm256_load_si256(reinterpret_cast((const T*)b)); +#elif defined(CPU_CAPABILITY_AVX512) + int_vector a_buffer = _mm512_load_si512(reinterpret_cast((const T*)a)); + int_vector b_buffer = _mm512_load_si512(reinterpret_cast((const T*)b)); +#endif + buffer = op(a_buffer, b_buffer); + __at_align__ T results[Vectorized::size()]; + +#if defined(CPU_CAPABILITY_AVX2) + _mm256_store_si256(reinterpret_cast(results), buffer); +#elif defined(CPU_CAPABILITY_AVX512) + _mm512_store_si512(reinterpret_cast(results), buffer); +#endif + return Vectorized::loadu(results); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); }); +#endif +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline +#if defined(CPU_CAPABILITY_AVX2) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); }); +#elif defined(CPU_CAPABILITY_AVX512) + return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); }); +#endif +} + +#else + +template +auto load(char const* data) -> T { + T ret; + std::memcpy(&ret, data, sizeof(ret)); + return ret; +} + +template +static inline Vectorized bitwise_binary_op(const Vectorized &a, const Vectorized &b, Op op) { + static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t); + __at_align__ intmax_t buffer[element_no]; + static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)"); + static_assert(sizeof(buffer) == sizeof(Vectorized), "sizeof(buffer) must match sizeof(Vectorized)"); + // We should be using memcpy in order to respect the strict aliasing rule + // see: https://github.com/pytorch/pytorch/issues/66119 + // Using char* is defined in the C11 standard 6.5 Expression paragraph 7 + // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf) + const auto* a_data = a.as_bytes(); + const auto* b_data = b.as_bytes(); + // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t) + for (auto& out : buffer) { + out = op(load(a_data), load(b_data)); + a_data += sizeof(intmax_t); + b_data += sizeof(intmax_t); + } + assert(a_data == a.as_bytes() + sizeof(a)); + assert(b_data == b.as_bytes() + sizeof(b)); + return Vectorized::loadu(buffer); +} + +template>::value, int> = 0> +inline Vectorized operator&(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_and()); +} +template>::value, int> = 0> +inline Vectorized operator|(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_or()); +} +template>::value, int> = 0> +inline Vectorized operator^(const Vectorized& a, const Vectorized& b) { + return bitwise_binary_op(a, b, std::bit_xor()); +} + +#endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512) + +template>::value, int> = 0> +inline Vectorized operator~(const Vectorized& a) { + using int_t = int_same_size_t; + Vectorized ones(c10::bit_cast((int_t)(~(int_t)0))); // All bits are 1 + return a ^ ones; +} + +template Vectorized inline operator<<(const Vectorized &a, const Vectorized &b) { + constexpr T max_shift = sizeof(T) * CHAR_BIT; + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + T shift = b[i]; + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { + c[i] = 0; + } else { + c[i] = static_cast>(a[i]) << shift; + } + } + return c; +} + +template Vectorized inline operator>>(const Vectorized &a, const Vectorized &b) { + // right shift value to retain sign bit for signed and no bits for unsigned + constexpr T max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v; + Vectorized c; + for (int i = 0; i != Vectorized::size(); i++) { + T shift = b[i]; + if ((static_cast>(shift) < 0) || (shift >= max_shift)) { + c[i] = a[i] >> max_shift; + } else { + c[i] = a[i] >> shift; + } + } + return c; +} + +template +inline Vectorized& operator += (Vectorized& a, const Vectorized& b) { + a = a + b; + return a; +} +template +inline Vectorized& operator -= (Vectorized& a, const Vectorized& b) { + a = a - b; + return a; +} +template +inline Vectorized& operator /= (Vectorized& a, const Vectorized& b) { + a = a / b; + return a; +} +template +inline Vectorized& operator %= (Vectorized& a, const Vectorized& b) { + a = a % b; + return a; +} +template +inline Vectorized& operator *= (Vectorized& a, const Vectorized& b) { + a = a * b; + return a; +} + +template +inline Vectorized& operator <<= (Vectorized& a, const Vectorized& b) { + a = a << b; + return a; +} + +template +inline Vectorized& operator >>= (Vectorized& a, const Vectorized& b) { + a = a >> b; + return a; +} + +template +inline Vectorized fmadd(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return a * b + c; +} + +template +inline Vectorized fmsub(const Vectorized& a, const Vectorized& b, const Vectorized& c) { + return a * b - c; +} + +template +std::enable_if_t> +inline gather(T const* base_addr, const Vectorized>& vindex) { + static constexpr int size = Vectorized::size(); + int_same_size_t index_arr[size]; + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } + return Vectorized::loadu(static_cast(buffer)); +} + +template +std::enable_if_t> +inline mask_gather(const Vectorized& src, T const* base_addr, + const Vectorized>& vindex, Vectorized& mask) { + static constexpr int size = Vectorized::size(); + T src_arr[size]; + int_same_size_t mask_arr[size]; // use int type so we can logical and + int_same_size_t index_arr[size]; + src.store(static_cast(src_arr)); + mask.store(static_cast(mask_arr)); + vindex.store(static_cast(index_arr)); + T buffer[size]; + for (const auto i : c10::irange(size)) { + if (mask_arr[i] & 0x01) { // check highest bit + buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)]; + } else { + buffer[i] = src_arr[i]; + } + } + mask = Vectorized(); // "zero out" mask + return Vectorized::loadu(static_cast(buffer)); +} + +// Cast a given vector to another type without changing the bits representation. +// So a Vectorized of 512 bits containing all ones can be cast to a +// Vectorized of 512 bits containing all ones (i.e., eight negative 1s). +// A Vec of 256 bits containing all ones can be cast to a +// Vec of 256 bits containing all ones (i.e., four negative 1s). +// There is a struct here because we don't have static_if and I can't +// partially specialize a templated function. +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + src_t src_arr[Vectorized::size()]; + src.store(static_cast(src_arr)); + return Vectorized::loadu(static_cast(src_arr)); + } +}; + +template +struct CastImpl { + static inline Vectorized apply(const Vectorized& src) { + return src; + } +}; + +template +inline Vectorized cast(const Vectorized& src) { + return CastImpl::apply(src); +} + +template > +inline Vectorized convert_to_int_of_same_size(const Vectorized& src) { + static_assert(sizeof(T) == sizeof(IntType)); + static constexpr int size = Vectorized::size(); + + std::array src_arr; + src.store(static_cast(src_arr.data())); + std::array buffer; + std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(), + [](const T& x) { return static_cast(x); }); + return Vectorized::loadu(static_cast(buffer.data())); +} + +template > +inline Vectorized convert_to_fp_of_same_size(const Vectorized& src) { + static_assert(sizeof(T) == sizeof(IntType)); + static constexpr int size = Vectorized::size(); + + std::array src_arr; + src.store(static_cast(src_arr.data())); + std::array buffer; + std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(), + [](const IntType& x) { return static_cast(x); }); + return Vectorized::loadu(static_cast(buffer.data())); +} + +// Example inputs for AVX512: +// a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// b Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// returns: +// Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// Example inputs for AVX2: a Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// b Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +// returns: Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +deinterleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i] = a_arr[i * 2]; + buffer1[half_size + i] = b_arr[i * 2]; + buffer2[i] = a_arr[i * 2 + 1]; + buffer2[half_size + i] = b_arr[i * 2 + 1]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +// inverse operation of deinterleave2 +// Example inputs for AVX512: +// a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} +// returns, for AVX512: +// Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} +// Vectorized = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} +// Example inputs for AVX2 : a Vectorized = {a0, a1, a2, a3, a4, a5, a6, a7} +// b Vectorized = {b0, b1, b2, b3, b4, b5, b6, b7} +// returns: Vectorized = {a0, b0, a1, b1, a2, b2, a3, b3} +// Vectorized = {a4, b4, a5, b5, a6, b6, a7, b7} +template +inline std::enable_if_t::size() % 2 == 0, std::pair, Vectorized>> +interleave2(const Vectorized& a, const Vectorized& b) { + static constexpr int size = Vectorized::size(); + static constexpr int half_size = size / 2; + T a_arr[size]; + T b_arr[size]; + T buffer1[size]; + T buffer2[size]; + a.store(static_cast(a_arr)); + b.store(static_cast(b_arr)); + for (const auto i : c10::irange(half_size)) { + buffer1[i * 2] = a_arr[i]; + buffer1[i * 2 + 1] = b_arr[i]; + buffer2[i * 2] = a_arr[half_size + i]; + buffer2[i * 2 + 1] = b_arr[half_size + i]; + } + return std::make_pair(Vectorized::loadu(static_cast(buffer1)), + Vectorized::loadu(static_cast(buffer2))); +} + +template +inline void convert(const src_T *src, dst_T *dst, int64_t n) { +#ifndef _MSC_VER +# pragma unroll +#endif + for (C10_UNUSED const auto i : c10::irange(n)) { + *dst = c10::convert(c10::load(src)); + src++; + dst++; + } +} + +template +inline Vectorized flip(const Vectorized & data) { + static constexpr int size = Vectorized::size(); + T output[size]; + T buffer[size]; + data.store(static_cast(buffer)); + for (const auto i : c10::irange(size)) { + output[i] = buffer[size - i - 1]; + } + return Vectorized::loadu(static_cast(output)); +} + +// Transpose the `src` buffer of type `T` and size (M,N) into the `dst` buffer. `ld_src` is the leading +// dimension of `src` and `ld_dst` is the leading dimension of `dst`. +template +inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst) { + for (int i = 0; i < M; i++) { + for (int j = 0; j < N; j++) { + dst[j*ld_dst + i] = src[i*ld_src + j]; + } + } +} + +}} // namespace at::vec::CPU_CAPABILITY + +// additional headers for more operations that depend on vec_base +#include +#include +#include diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h new file mode 100644 index 0000000000000000000000000000000000000000..ac8244d0f768404bc4ba93d78f0c12ca3a379d0a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_convert.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + typename Enabled = void> +struct VecConvert { + static inline VectorizedN apply( + const VectorizedN& src) { + constexpr int count = std::min( + VectorizedN::size(), VectorizedN::size()); + __at_align__ src_t src_buf[VectorizedN::size()]; + src.store(src_buf); + __at_align__ dst_t dst_buf[VectorizedN::size()]; + for (int i = 0; i < count; i++) { + dst_buf[i] = static_cast(src_buf[i]); + } + return VectorizedN::loadu(dst_buf, count); + } +}; + +template +inline typename std::enable_if::value, Vectorized>::type +convert(const Vectorized& src) { + return src; +} + +template +inline typename std::enable_if::value, Vectorized>::type +convert(const Vectorized& src) { + return VecConvert::apply(src); +} + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + std::enable_if_t = 0> +inline VectorizedN convert(const VectorizedN& src) { + return VecConvert::apply(src); +} + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + bool keep = false, + std::enable_if_t = 0> +inline typename std::conditional, Vectorized>::type +convert(const VectorizedN& src) { + return VecConvert::apply(src); +} + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h new file mode 100644 index 0000000000000000000000000000000000000000..0bff6f4abfe11f52b22b1735ba26c48c9c68b30b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h @@ -0,0 +1,50 @@ +#pragma once + +#include + +namespace at::vec { +// See Note [CPU_CAPABILITY namespace] +inline namespace CPU_CAPABILITY { + +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) +static inline uint16_t float2half_scalar(float val) { +#if defined(CPU_CAPABILITY_AVX2) +#if defined(_MSC_VER) + __m256 v = _mm256_set1_ps(val); + __m128i o = + _mm256_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return static_cast(_mm_cvtsi128_si32(o)); +#else + return _cvtss_sh(val, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC); +#endif +#elif defined(CPU_CAPABILITY_AVX512) + __m512 v = _mm512_set1_ps(val); + __m256i o = + _mm512_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); + return static_cast( + _mm_cvtsi128_si32(_mm256_castsi256_si128(o))); +#endif +} + +static inline float half2float_scalar(uint16_t val) { +#if defined(CPU_CAPABILITY_AVX2) +#if defined(_MSC_VER) + __m128i v = _mm_cvtsi32_si128(val); + __m256 o = _mm256_cvtph_ps(v); + return _mm256_cvtss_f32(o); +#else + return _cvtsh_ss(val); +#endif +#elif defined(CPU_CAPABILITY_AVX512) + __m256i v = + _mm256_setr_epi16(val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + __m512 o = _mm512_cvtph_ps(v); + return _mm512_cvtss_f32(o); +#endif +} + +#endif + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h new file mode 100644 index 0000000000000000000000000000000000000000..90f0f98962d900462ded2228d6506acc7bfa195f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_mask.h @@ -0,0 +1,266 @@ +#pragma once + +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +/** + * The `VecMask` class provides a convenient interface for working with + * vectorized masks in SIMD operations. It encapsulates a `Vectorized` + * mask that can be directly usable in masked vectorized operations. It provides + * various methods for manipulating and accessing the mask elements: + * 1. `from` and `to`: Conversion between a vector of boolean values and a + * vectorized mask. + * 2. `cast`: Casts the mask to a different base type. + * 3. `all_zero`: Checks if all mask elements are zero. + * 4. `is_masked`: Checks if a specific element is masked. + * 5. `loadu`: Loads data from memory using the mask. + * 6. `all_masked`: Checks if all mask elements are masked. + * + * Some helper template classes are provided to simplify the specialization of + * the `VecMask` for the specific CPU arch: + * 1. `VecMaskLoad`: Loads data from memory using the mask. + * 2. `VecMaskTo`: Converts the mask to boolean. + * 3. `VecMaskCast`: Casts the mask to a different base type. + * + */ +template +class VecMask; + +template < + typename data_t, + int data_n, + typename mask_t, + int mask_n, + typename Enabled = void> +struct VecMaskLoad { + static inline VectorizedN apply( + const data_t* ptr, + const VecMask& vec_mask) { + constexpr typename VecMask::size_type size = + VecMask::size(); + static_assert(VectorizedN::size() >= size); + __at_align__ data_t data[size]; + __at_align__ mask_t mask[size]; + auto mask_ = VectorizedN(vec_mask); + mask_.store(mask); + for (int i = 0; i < size; i++) { + data[i] = mask[i] ? ptr[i] : static_cast(0); + } + return VectorizedN::loadu(data, size); + } +}; + +template < + typename dst_t, + int dst_n, + typename src_t, + int src_n, + typename Enabled = void> +struct VecMaskTo { + static inline VecMask apply( + const VecMask& vec_mask) { + auto zeros = VectorizedN(static_cast(0)); + auto ones = VectorizedN(static_cast(1)); + return VectorizedN::blendv( + zeros, ones, vec_mask.template cast()); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply( + const VecMask& vec_mask) { + return VecMask::from(VectorizedN(vec_mask)); + } +}; + +template +struct VecMaskCast { + static inline VecMask apply(const VecMask& vec_mask) { + return vec_mask; + } +}; + +template +class VecMask { + public: + using size_type = int; + static constexpr size_type size() { + return VectorizedN::size(); + } + + private: + VectorizedN mask_; + + public: + VecMask() : mask_(static_cast(0)) {} + VecMask(const VectorizedN& mask) : mask_(mask) {} + + template = 0> + VecMask(const Vectorized& mask) : mask_(mask) {} + + template + static VecMask from(const VectorizedN& b_vec) { + __at_align__ U b_buf[size()]; + if constexpr (size() >= VectorizedN::size()) { + b_vec.store(b_buf); + for (int i = VectorizedN::size(); i < size(); i++) { + b_buf[i] = static_cast(0); + } + } else { + b_vec.store(b_buf, size()); + } + return from(b_buf); + } + + template + static VecMask from(U b) { + using int_t = int_same_size_t; + T mask = b ? c10::bit_cast((int_t)(~(int_t)0)) : (T)0; + return VectorizedN(mask); + } + + template + static VecMask from(U* b) { + using int_t = int_same_size_t; + __at_align__ T mask[size()]; +#pragma unroll + for (int i = 0; i < size(); i++) { + *(int_t*)(mask + i) = b[i] ? ~(int_t)0 : (int_t)0; + } + return VectorizedN(VectorizedN::loadu(mask)); + } + + static VecMask blendv( + const VecMask& c, + const VecMask& b, + const VecMask& a) { + VectorizedN result = VectorizedN::blendv( + VectorizedN(c), + VectorizedN(b), + VectorizedN(a)); + return result; + } + + void store(bool* b, int count = size()) { + constexpr int L = (VectorizedN::size() + Vectorized::size() - 1)/ Vectorized::size(); + auto res = this->to(); + res.store(b, count); + return; + } + + template = 2, int> = 0> + inline VectorizedN to() const { + return VecMaskTo::apply(*this); + } + + template = 0> + inline Vectorized to() const { + return VecMaskTo::apply(*this); + } + + template + inline VecMask cast() const { + return VecMaskCast::apply(*this); + } + + inline bool all_zero() const { + __at_align__ T mask[size()]; + mask_.store(mask); + return std::all_of( + mask, mask + size(), [](T m) { return m == static_cast(0); }); + } + + inline bool all_masked() const { + __at_align__ T mask[size()]; + mask_.store(mask); + return std::all_of( + mask, mask + size(), [](T m) { return m != static_cast(0); }); + } + + inline bool is_masked(int i) const { + __at_align__ T mask[size()]; + mask_.store(mask); + return mask[i] != static_cast(0); + } + + inline operator VectorizedN() const { + return mask_; + } + + template = 0> + inline operator Vectorized() const { + return mask_[0]; + } + + inline Vectorized operator[](int i) const { + return mask_[i]; + } + + template < + typename U, + int L, + std::enable_if_t= 2 && VectorizedN::size() >= size(), int> = 0> + VectorizedN loadu(const U* ptr) const { + return VecMaskLoad::apply(ptr, *this); + } + + template < + typename U, + int L, + std::enable_if_t::size() >= size(), int> = 0> + Vectorized loadu(const U* ptr) const { + return VecMaskLoad::apply(ptr, *this); + } +}; + +#define VEC_MASK_DEFINE_UNARY_OP_GLOBAL(op) \ + template \ + inline VecMask op(const VecMask& a) { \ + return op(VectorizedN(a)); \ + } + +#define VEC_MASK_DEFINE_BINARY_OP_GLOBAL(op) \ + template < \ + typename T, \ + int N, \ + typename V, \ + int M, \ + std::enable_if_t::size() == VecMask::size(), int> = \ + 0> \ + inline VecMask op(const VecMask& a, const VecMask& b) { \ + return op( \ + VectorizedN(a), VectorizedN(b.template cast())); \ + } + +#define VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(op, EXPR) \ + template < \ + typename T, \ + int N, \ + typename V, \ + int M, \ + std::enable_if_t::size() == VecMask::size(), int> = \ + 0> \ + inline VecMask op(const VecMask& a, const VecMask& b) { \ + return EXPR; \ + } + +VEC_MASK_DEFINE_UNARY_OP_GLOBAL(operator~) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator&) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator|) +VEC_MASK_DEFINE_BINARY_OP_GLOBAL(operator^) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>, a & ~b) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<, ~a& b) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator==, ~(a ^ b)) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator>=, (a == b) | (a > b)) +VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL(operator<=, (a == b) | (a < b)) + +#undef VEC_MASK_DEFINE_UNARY_OP_GLOBAL +#undef VEC_MASK_DEFINE_BINARY_OP_GLOBAL +#undef VEC_MASK_DEFINE_BINARY_OP_WITH_EXPR_GLOBAL + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h new file mode 100644 index 0000000000000000000000000000000000000000..5b0eb352d66273562108896c682f4b3dbda99019 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h @@ -0,0 +1,356 @@ +#pragma once + +#include +#include + +namespace at::vec { +inline namespace CPU_CAPABILITY { + +/** + * @brief A class template representing a vectorized type with + * `N * Vectorized::size()` elements, aiming to support vectors of + * arbitrary size. A specific use case of it is to represent vectors + * converted from data types with different sizes but with the same + * number of vector elements, e.g., `VectorizedN` can be + * a vector converted from two `Vectorized`, `VectorizedN` + * can be a vector converted from two `Vectorized` etc. + * + * It supports most of the operations of `Vectorized` + * and the implementation delegates to `Vectorized` with loops over `N`. + * + * @tparam T The underlying type of the vectorized elements. + * @tparam N The number of underlying `Vectorized`. + */ +template +class VectorizedN { + public: + using value_type = T; + using size_type = int; + + static constexpr size_type size_T = sizeof(T); + static constexpr size_type size() { + return Vectorized::size() * N; + } + + private: + std::array, N> values; + + public: + // methods not implemented yet: + // variadic constructor, operator T*, as_bytes, zero_mask + +#define VECTORIZEDN_DEFINE_UNARY_OP(op) \ + VectorizedN op() const { \ + return unary_op([](const Vectorized& a) { return a.op(); }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP(op) \ + VectorizedN op(const VectorizedN& other) const { \ + return binary_op( \ + other, [](const Vectorized& a, const Vectorized& b) { \ + return a.op(b); \ + }); \ + } + + template + inline VectorizedN unary_op(Op op) const { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result.values[i] = op(values[i]); + } + return result; + } + + template + inline VectorizedN binary_op(const VectorizedN& other, Op op) + const { + VectorizedN result; +#ifndef _MSC_VER +#pragma unroll +#endif + for (int i = 0; i < N; ++i) { + result.values[i] = op(values[i], other.values[i]); + } + return result; + } + + VectorizedN() = default; + + explicit VectorizedN(T val) { + for (int i = 0; i < N; ++i) { + values[i] = Vectorized(val); + } + } + + template = 0> + VectorizedN(const Vectorized& val) : values({val}) {} + + template = 0> + inline operator Vectorized() const { + return values[0]; + } + + inline const Vectorized& operator[](int i) const { + return values[i]; + } + + inline Vectorized& operator[](int i) { + return values[i]; + } + + template + static VectorizedN blend( + const VectorizedN& a, + const VectorizedN& b) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::template blend(a.values[i], b.values[i]); + } + return result; + } + + static VectorizedN blendv( + const VectorizedN& a, + const VectorizedN& b, + const VectorizedN& mask) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = + Vectorized::blendv(a.values[i], b.values[i], mask.values[i]); + } + return result; + } + + template + static VectorizedN arange( + T base = static_cast(0), + step_t step = static_cast(1)) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::arange(base, step); + base += step * Vectorized::size(); + } + return result; + } + + static VectorizedN set( + const VectorizedN& a, + const VectorizedN& b, + int64_t count = size()) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::set( + a.values[i], + b.values[i], + std::min(count, (int64_t)Vectorized::size())); + count -= Vectorized::size(); + if (count <= 0) { + break; + } + } + return result; + } + + static VectorizedN loadu(const void* ptr) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::loadu(ptr); + ptr = static_cast(ptr) + Vectorized::size(); + } + return result; + } + + static VectorizedN loadu(const void* ptr, int64_t count) { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = Vectorized::loadu( + ptr, std::min(count, (int64_t)Vectorized::size())); + ptr = static_cast(ptr) + Vectorized::size(); + count -= Vectorized::size(); + if (count <= 0) { + break; + } + } + return result; + } + + void store(void* ptr) const { + for (int i = 0; i < N; ++i) { + values[i].store(ptr); + ptr = static_cast(ptr) + Vectorized::size(); + } + } + + void store(void* ptr, int count) const { + for (int i = 0; i < N; ++i) { + values[i].store(ptr, std::min(count, (int)Vectorized::size())); + ptr = static_cast(ptr) + Vectorized::size(); + count -= Vectorized::size(); + if (count <= 0) { + break; + } + } + } + + bool has_inf_nan() const { + for (int i = 0; i < N; ++i) { + if (values[i].has_inf_nan()) { + return true; + } + } + return false; + } + + VectorizedN map(T (*const f)(T)) const { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = values[i].map(f); + } + return result; + } + + VectorizedN map(T (*const f)(const T&)) const { + VectorizedN result; + for (int i = 0; i < N; ++i) { + result.values[i] = values[i].map(f); + } + return result; + } + + VECTORIZEDN_DEFINE_UNARY_OP(abs) + VECTORIZEDN_DEFINE_UNARY_OP(sgn) + VECTORIZEDN_DEFINE_UNARY_OP(angle) + VECTORIZEDN_DEFINE_UNARY_OP(real) + VECTORIZEDN_DEFINE_UNARY_OP(imag) + VECTORIZEDN_DEFINE_UNARY_OP(conj) + VECTORIZEDN_DEFINE_UNARY_OP(acos) + VECTORIZEDN_DEFINE_UNARY_OP(acosh) + VECTORIZEDN_DEFINE_UNARY_OP(asin) + VECTORIZEDN_DEFINE_UNARY_OP(atan) + VECTORIZEDN_DEFINE_UNARY_OP(atanh) + VECTORIZEDN_DEFINE_BINARY_OP(atan2) + VECTORIZEDN_DEFINE_BINARY_OP(copysign) + VECTORIZEDN_DEFINE_UNARY_OP(erf) + VECTORIZEDN_DEFINE_UNARY_OP(erfc) + VECTORIZEDN_DEFINE_UNARY_OP(erfinv) + VECTORIZEDN_DEFINE_UNARY_OP(exp) + VECTORIZEDN_DEFINE_UNARY_OP(exp2) + VECTORIZEDN_DEFINE_UNARY_OP(expm1) + VECTORIZEDN_DEFINE_UNARY_OP(exp_u20) + VECTORIZEDN_DEFINE_UNARY_OP(frac) + VECTORIZEDN_DEFINE_BINARY_OP(fmod) + VECTORIZEDN_DEFINE_UNARY_OP(log) + VECTORIZEDN_DEFINE_UNARY_OP(log10) + VECTORIZEDN_DEFINE_UNARY_OP(log1p) + VECTORIZEDN_DEFINE_UNARY_OP(log2) + VECTORIZEDN_DEFINE_UNARY_OP(ceil) + VECTORIZEDN_DEFINE_UNARY_OP(cos) + VECTORIZEDN_DEFINE_UNARY_OP(cosh) + VECTORIZEDN_DEFINE_UNARY_OP(floor) + VECTORIZEDN_DEFINE_BINARY_OP(hypot) + VECTORIZEDN_DEFINE_UNARY_OP(i0) + VECTORIZEDN_DEFINE_UNARY_OP(i0e) + VECTORIZEDN_DEFINE_UNARY_OP(digamma) + VECTORIZEDN_DEFINE_BINARY_OP(igamma) + VECTORIZEDN_DEFINE_BINARY_OP(igammac) + VECTORIZEDN_DEFINE_UNARY_OP(neg) + VECTORIZEDN_DEFINE_BINARY_OP(nextafter) + VECTORIZEDN_DEFINE_UNARY_OP(round) + VECTORIZEDN_DEFINE_UNARY_OP(sin) + VECTORIZEDN_DEFINE_UNARY_OP(sinh) + VECTORIZEDN_DEFINE_UNARY_OP(tan) + VECTORIZEDN_DEFINE_UNARY_OP(tanh) + VECTORIZEDN_DEFINE_UNARY_OP(trunc) + VECTORIZEDN_DEFINE_UNARY_OP(lgamma) + VECTORIZEDN_DEFINE_UNARY_OP(sqrt) + VECTORIZEDN_DEFINE_UNARY_OP(reciprocal) + VECTORIZEDN_DEFINE_UNARY_OP(rsqrt) + VECTORIZEDN_DEFINE_BINARY_OP(pow) + VECTORIZEDN_DEFINE_BINARY_OP(operator==) + VECTORIZEDN_DEFINE_BINARY_OP(operator!=) + VECTORIZEDN_DEFINE_BINARY_OP(operator>=) + VECTORIZEDN_DEFINE_BINARY_OP(operator<=) + VECTORIZEDN_DEFINE_BINARY_OP(operator>) + VECTORIZEDN_DEFINE_BINARY_OP(operator<) + VECTORIZEDN_DEFINE_BINARY_OP(eq) + VECTORIZEDN_DEFINE_BINARY_OP(ne) + VECTORIZEDN_DEFINE_BINARY_OP(gt) + VECTORIZEDN_DEFINE_BINARY_OP(ge) + VECTORIZEDN_DEFINE_BINARY_OP(lt) + VECTORIZEDN_DEFINE_BINARY_OP(le) + +#undef VECTORIZEDN_DEFINE_UNARY_OP +#undef VECTORIZEDN_DEFINE_BINARY_OP +}; + +#define VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(op) \ + template \ + inline VectorizedN op(const VectorizedN& a) { \ + return a.unary_op([](const Vectorized& a) { return op(a); }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(op) \ + template \ + inline VectorizedN op( \ + const VectorizedN& a, const VectorizedN& b) { \ + return a.binary_op(b, [](const Vectorized& a, const Vectorized& b) { \ + return op(a, b); \ + }); \ + } + +#define VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(op) \ + template \ + inline VectorizedN& op( \ + VectorizedN& a, const VectorizedN& b) { \ + a = a.binary_op(b, [](const Vectorized& a, const Vectorized& b) { \ + return op(a, b); \ + }); \ + return a; \ + } + +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator+) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator-) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator*) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator/) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator%) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator||) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator<<) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator>>) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(maximum) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(minimum) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmadd) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmsub) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_max) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_min) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator&) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator|) +VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator^) +VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(operator~) + +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator+=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator-=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator*=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator/=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator%=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator<<=) +VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator>>=) + +#undef VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL +#undef VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL +#undef VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL + +template +inline T vec_reduce_all(const OpVec& vec_fun, VectorizedN acc_vec) { + Vectorized vec_result = acc_vec[0]; + for (int i = 1; i < N; i++) { + vec_result = vec_fun(vec_result, acc_vec[i]); + } + return vec_reduce_all(vec_fun, vec_result); +} + +} // namespace CPU_CAPABILITY +} // namespace at::vec diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h new file mode 100644 index 0000000000000000000000000000000000000000..fe57a27a04d9fee415f709a065583a8e85078ac7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h @@ -0,0 +1,171 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +// This header implements various unary operations using a MKL VML style +// interface. + +// It implements various functions with a simple interface +// For example it enables the user to call vsin(float* out, const float* in, +// size) This functions takes a pointer to a continuous output array of floats and +// a constant input array. It will then apply sin to each value in the input +// array and write the result into the output array. out and in may point to the +// same memory, i.e. this fully supports in-place operations. These functions +// also implement their own parallelization, so take precautions when calling +// these from threaded functions. + +// When MKL is available it will call into MKL's VML library similar to NumPy +// If MKL is not available it will use SLEEF. + +// This file might be compiled under AVX or AVX2 when called from e.g. +// UnaryOpsKernel.cpp + +#include +#include +#include +#include +#include + +#if AT_MKL_ENABLED() && !defined(__APPLE__) +#include +#endif + +namespace at { +namespace vml { +inline namespace CPU_CAPABILITY { + +using namespace vec; + +template +inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) { + parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { + map( + [](const Vectorized& x) { + return Vectorized((scalar_t)(1)) / x.sqrt(); + }, + out + begin, + in + begin, + end - begin); + }); +} + +// NB: We ignore numerical errors by convention and leave them to the user + +#define IMPLEMENT_VML(op) \ + template \ + inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \ + using vec_t = Vectorized>; \ + vec::map([](vec_t x) { return x.op(); }, out, in, size); \ + } \ + +IMPLEMENT_VML(abs) +IMPLEMENT_VML(acos) +IMPLEMENT_VML(asin) +IMPLEMENT_VML(atan) +IMPLEMENT_VML(atanh) +IMPLEMENT_VML(ceil) +IMPLEMENT_VML(cos) +// IMPLEMENT_VML(cosh) +IMPLEMENT_VML(erf) +IMPLEMENT_VML(erfc) +IMPLEMENT_VML(erfinv) +IMPLEMENT_VML(exp) +IMPLEMENT_VML(expm1) +IMPLEMENT_VML(floor) +IMPLEMENT_VML(i0) +IMPLEMENT_VML(i0e) +IMPLEMENT_VML(digamma) +IMPLEMENT_VML(reciprocal) +IMPLEMENT_VML(log) +IMPLEMENT_VML(log10) +IMPLEMENT_VML(log1p) +IMPLEMENT_VML(log2) +IMPLEMENT_VML(neg) +IMPLEMENT_VML(sin) +// IMPLEMENT_VML(sinh) +IMPLEMENT_VML(sqrt) +IMPLEMENT_VML(round) +IMPLEMENT_VML(rsqrt) +IMPLEMENT_VML(tan) +IMPLEMENT_VML(tanh) +IMPLEMENT_VML(trunc) +IMPLEMENT_VML(lgamma) + + +#if AT_MKL_ENABLED() && !defined(__APPLE__) + +// NB: LP64 MKL is the most commonly used and thus we assume it here. That means +// we need to expect MKL_INT to be of type int, which implies int32_t or int64_t in most +// cases. +static_assert( + std::is_same_v || std::is_same_v, + "MKL_INT is assumed to be int32_t or int64_t"); +#define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \ + template <> \ + inline void v##op(type * out, const type * in, int64_t size) { \ + int64_t max_mkl_ind = std::numeric_limits::max(); \ + if (size <= static_cast(max_mkl_ind)) { \ + vm##mkltype##mklop( \ + size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } else { \ + MKL_INT ind = 0; \ + int64_t chunks = size / max_mkl_ind; \ + int64_t rest = size % max_mkl_ind; \ + for (; ind < chunks; ind++) { \ + vm##mkltype##mklop( \ + max_mkl_ind, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + vm##mkltype##mklop( \ + rest, \ + in + ind * max_mkl_ind, \ + out + ind * max_mkl_ind, \ + VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ + } \ + } + +#define IMPLEMENT_VML_MKL(op, mklop) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \ + IMPLEMENT_VML_MKL_STUB(op, mklop, double, d) + +// NB: abs, cosh and sinh were temporarily disabled due to issues with Apple +// NB: expm1 is disabled because on some configs it produces expm1(nan)=-1 +IMPLEMENT_VML_MKL(acos, Acos) +IMPLEMENT_VML_MKL(asin, Asin) +IMPLEMENT_VML_MKL(atan, Atan) +IMPLEMENT_VML_MKL(cos, Cos) +// IMPLEMENT_VML_MKL(cosh, Cosh) +IMPLEMENT_VML_MKL(erf, Erf) +IMPLEMENT_VML_MKL(erfc, Erfc) +IMPLEMENT_VML_MKL(erfinv, ErfInv) +IMPLEMENT_VML_MKL(exp, Exp) +// IMPLEMENT_VML_MKL(expm1, Expm1) +IMPLEMENT_VML_MKL(log, Ln) +IMPLEMENT_VML_MKL(log10, Log10) +IMPLEMENT_VML_MKL(sin, Sin) +// IMPLEMENT_VML_MKL(sinh, Sinh) +IMPLEMENT_VML_MKL(sqrt, Sqrt) +IMPLEMENT_VML_MKL(tan, Tan) +IMPLEMENT_VML_MKL(tanh, Tanh) +IMPLEMENT_VML_MKL(trunc, Trunc) + +// Not vectorized in MKL version tested +// IMPLEMENT_VML_MKL(abs, Abs) +// IMPLEMENT_VML_MKL(log1p, Log1p) + +#if INTEL_MKL_VERSION >= 20180406 +IMPLEMENT_VML_MKL(log2, Log2) +#endif + +#endif + +} // namespace +} // namespace vml +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..05a4014030d2a1186a5681545ca4678b32936186 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fused_dropout { + using schema = ::std::tuple (const at::Tensor &, double, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_dropout") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, double p, ::std::optional generator); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator); +}; + +struct TORCH_API _fused_dropout_out { + using schema = ::std::tuple (const at::Tensor &, double, ::std::optional, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fused_dropout") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fused_dropout.out(Tensor self, float p, Generator? generator=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out0, at::Tensor & out1); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out0, at::Tensor & out1); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5127ff1f0312f675412976861009a598591df696 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_lengths_native.h @@ -0,0 +1,20 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..dd08c4d32ed77139295845a532431aaf5618d08f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_unique2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unique2 { + using schema = ::std::tuple (const at::Tensor &, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts); +}; + +struct TORCH_API _unique2_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0ac5cfccd87dc62a9dd9584839b937bd4aab978d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardshrink(const at::Tensor & self, const at::Scalar & lambd=0.5); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/moveaxis_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/moveaxis_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8c13dfd717d4d4d964bf82986a120237152868b9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/moveaxis_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API moveaxis_intlist { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "intlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination); +}; + +struct TORCH_API moveaxis_int { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::moveaxis") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t source, int64_t destination); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t source, int64_t destination); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h new file mode 100644 index 0000000000000000000000000000000000000000..29d74cff1e0c495680979b4350819c02fa65ddb6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/AlignOf.h @@ -0,0 +1,176 @@ +//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the AlignedCharArray and AlignedCharArrayUnion classes. +// +//===----------------------------------------------------------------------===// + +// ATen: modified from llvm::AlignOf +// replaced LLVM_ALIGNAS with alignas + +#pragma once + +#include + +namespace c10 { + +/// \struct AlignedCharArray +/// \brief Helper for building an aligned character array type. +/// +/// This template is used to explicitly build up a collection of aligned +/// character array types. We have to build these up using a macro and explicit +/// specialization to cope with MSVC (at least till 2015) where only an +/// integer literal can be used to specify an alignment constraint. Once built +/// up here, we can then begin to indirect between these using normal C++ +/// template parameters. + +// MSVC requires special handling here. +#ifndef _MSC_VER + +template +struct AlignedCharArray { + // NOLINTNEXTLINE(*c-arrays) + alignas(Alignment) char buffer[Size]; +}; + +#else // _MSC_VER + +/// \brief Create a type with an aligned char buffer. +template +struct AlignedCharArray; + +// We provide special variations of this template for the most common +// alignments because __declspec(align(...)) doesn't actually work when it is +// a member of a by-value function argument in MSVC, even if the alignment +// request is something reasonably like 8-byte or 16-byte. Note that we can't +// even include the declspec with the union that forces the alignment because +// MSVC warns on the existence of the declspec despite the union member forcing +// proper alignment. + +template +struct AlignedCharArray<1, Size> { + union { + char aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<2, Size> { + union { + short aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<4, Size> { + union { + int aligned; + char buffer[Size]; + }; +}; + +template +struct AlignedCharArray<8, Size> { + union { + double aligned; + char buffer[Size]; + }; +}; + +// The rest of these are provided with a __declspec(align(...)) and we simply +// can't pass them by-value as function arguments on MSVC. + +#define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template \ + struct AlignedCharArray { \ + __declspec(align(x)) char buffer[Size]; \ + }; + +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64) +AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128) + +#undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT + +#endif // _MSC_VER + +namespace detail { +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +class AlignerImpl { + T1 t1; + T2 t2; + T3 t3; + T4 t4; + T5 t5; + T6 t6; + T7 t7; + T8 t8; + T9 t9; + T10 t10; + + public: + AlignerImpl() = delete; +}; + +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +union SizerImpl { + // NOLINTNEXTLINE(*c-arrays) + char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)], + arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)], + arr9[sizeof(T9)], arr10[sizeof(T10)]; +}; +} // end namespace detail + +/// \brief This union template exposes a suitably aligned and sized character +/// array member which can hold elements of any of up to ten types. +/// +/// These types may be arrays, structs, or any other types. The goal is to +/// expose a char array buffer member which can be used as suitable storage for +/// a placement new of any of these types. Support for more than ten types can +/// be added at the cost of more boilerplate. +template < + typename T1, + typename T2 = char, + typename T3 = char, + typename T4 = char, + typename T5 = char, + typename T6 = char, + typename T7 = char, + typename T8 = char, + typename T9 = char, + typename T10 = char> +struct AlignedCharArrayUnion + : AlignedCharArray< + alignof(detail::AlignerImpl), + sizeof(::c10::detail:: + SizerImpl)> {}; +} // end namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..10ab0c828d7a8f21de30d43e9a7d8985733d5463 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-inl.h @@ -0,0 +1,343 @@ +#pragma once + +#include +#include + +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +#if defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#else +#include // for SYCL 2020 +#endif +#include +#endif + +namespace c10 { + +/// Constructors +inline C10_HOST_DEVICE BFloat16::BFloat16(float value) + : +#if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \ + __CUDA_ARCH__ >= 800 + x(__bfloat16_as_ushort(__float2bfloat16(value))) +#elif defined(__SYCL_DEVICE_ONLY__) && \ + defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) + x(c10::bit_cast(sycl::ext::oneapi::bfloat16(value))) +#else + // RNE by default + x(detail::round_to_nearest_even(value)) +#endif +{ +} + +/// Implicit conversions +inline C10_HOST_DEVICE BFloat16::operator float() const { +#if defined(__CUDACC__) && !defined(USE_ROCM) + return __bfloat162float(*reinterpret_cast(&x)); +#elif defined(__SYCL_DEVICE_ONLY__) && \ + defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) + return float(*reinterpret_cast(&x)); +#else + return detail::f32_from_bits(x); +#endif +} + +#if defined(__CUDACC__) && !defined(USE_ROCM) +inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const { + return *reinterpret_cast(&x); +} +#endif + +#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) +inline C10_HOST_DEVICE BFloat16::BFloat16( + const sycl::ext::oneapi::bfloat16& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const { + return *reinterpret_cast(&x); +} +#endif + +// CUDA intrinsics + +#if defined(__CUDACC__) || defined(__HIPCC__) +inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) { +#if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + return __ldg(reinterpret_cast(ptr)); +#else + return *ptr; +#endif +} +#endif + +/// Arithmetic + +inline C10_HOST_DEVICE BFloat16 +operator+(const BFloat16& a, const BFloat16& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 +operator-(const BFloat16& a, const BFloat16& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 +operator*(const BFloat16& a, const BFloat16& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) { + return -static_cast(a); +} + +inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) { + a = a / b; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) { + a.x = a.x | b.x; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) { + a.x = a.x ^ b.x; + return a; +} + +inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) { + a.x = a.x & b.x; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) { + return static_cast(a) / b; +} + +// Overloading < and > operators, because std::max and std::min use them. + +inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) { + return float(lhs) > float(rhs); +} + +inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) { + return float(lhs) < float(rhs); +} + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_signed = true; + static constexpr bool is_specialized = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr auto has_denorm = numeric_limits::has_denorm; + static constexpr auto has_denorm_loss = + numeric_limits::has_denorm_loss; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 8; + static constexpr int digits10 = 2; + static constexpr int max_digits10 = 4; + static constexpr int radix = 2; + static constexpr int min_exponent = -125; + static constexpr int min_exponent10 = -37; + static constexpr int max_exponent = 128; + static constexpr int max_exponent10 = 38; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + + static constexpr c10::BFloat16 min() { + return c10::BFloat16(0x0080, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 lowest() { + return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 max() { + return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 epsilon() { + return c10::BFloat16(0x3C00, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 round_error() { + return c10::BFloat16(0x3F00, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 infinity() { + return c10::BFloat16(0x7F80, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 quiet_NaN() { + return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 signaling_NaN() { + return c10::BFloat16(0x7F80, c10::BFloat16::from_bits()); + } + static constexpr c10::BFloat16 denorm_min() { + return c10::BFloat16(0x0001, c10::BFloat16::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h new file mode 100644 index 0000000000000000000000000000000000000000..04ad455e331330f18be68e74a4077057a4854c74 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/CallOnce.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace c10 { + +// custom c10 call_once implementation to avoid the deadlock in std::call_once. +// The implementation here is a simplified version from folly and likely much +// much higher memory footprint. +template +inline void call_once(Flag& flag, F&& f, Args&&... args) { + if (C10_LIKELY(flag.test_once())) { + return; + } + flag.call_once_slow(std::forward(f), std::forward(args)...); +} + +class once_flag { + public: +#ifndef _WIN32 + // running into build error on MSVC. Can't seem to get a repro locally so I'm + // just avoiding constexpr + // + // C:/actions-runner/_work/pytorch/pytorch\c10/util/CallOnce.h(26): error: + // defaulted default constructor cannot be constexpr because the + // corresponding implicitly declared default constructor would not be + // constexpr 1 error detected in the compilation of + // "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/cuda/cub.cu". + constexpr +#endif + once_flag() noexcept = default; + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + private: + template + friend void call_once(Flag& flag, F&& f, Args&&... args); + + template + void call_once_slow(F&& f, Args&&... args) { + std::lock_guard guard(mutex_); + if (init_.load(std::memory_order_relaxed)) { + return; + } + c10::guts::invoke(std::forward(f), std::forward(args)...); + init_.store(true, std::memory_order_release); + } + + bool test_once() { + return init_.load(std::memory_order_acquire); + } + + void reset_once() { + init_.store(false, std::memory_order_release); + } + + private: + std::mutex mutex_; + std::atomic init_{false}; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h new file mode 100644 index 0000000000000000000000000000000000000000..88440a0242eb4e9e87433278006863fd38c5450d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Deprecated.h @@ -0,0 +1,102 @@ +#pragma once + +/** + * This file provides portable macros for marking declarations + * as deprecated. You should generally use C10_DEPRECATED, + * except when marking 'using' declarations as deprecated, + * in which case you should use C10_DEFINE_DEPRECATED_USING + * (due to portability concerns). + */ + +// Sample usage: +// +// C10_DEPRECATED void bad_func(); +// struct C10_DEPRECATED BadStruct { +// ... +// }; + +// NB: __cplusplus doesn't work for MSVC, so for now MSVC always uses +// the "__declspec(deprecated)" implementation and not the C++14 +// "[[deprecated]]" attribute. We tried enabling "[[deprecated]]" for C++14 on +// MSVC, but ran into issues with some older MSVC versions. +#if (defined(__cplusplus) && __cplusplus >= 201402L) +#define C10_DEPRECATED [[deprecated]] +#define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]] +#elif defined(__GNUC__) +#define C10_DEPRECATED __attribute__((deprecated)) +// TODO Is there some way to implement this? +#define C10_DEPRECATED_MESSAGE(message) __attribute__((deprecated)) + +#elif defined(_MSC_VER) +#define C10_DEPRECATED __declspec(deprecated) +#define C10_DEPRECATED_MESSAGE(message) __declspec(deprecated(message)) +#else +#warning "You need to implement C10_DEPRECATED for this compiler" +#define C10_DEPRECATED +#endif + +// Sample usage: +// +// C10_DEFINE_DEPRECATED_USING(BadType, int) +// +// which is the portable version of +// +// using BadType [[deprecated]] = int; + +// technically [[deprecated]] syntax is from c++14 standard, but it works in +// many compilers. +#if defined(__has_cpp_attribute) +#if __has_cpp_attribute(deprecated) && !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#endif +#endif + +#if defined(_MSC_VER) +#if defined(__CUDACC__) +// neither [[deprecated]] nor __declspec(deprecated) work on nvcc on Windows; +// you get the error: +// +// error: attribute does not apply to any entity +// +// So we just turn the macro off in this case. +#if defined(C10_DEFINE_DEPRECATED_USING) +#undef C10_DEFINE_DEPRECATED_USING +#endif +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#else +// [[deprecated]] does work in windows without nvcc, though msc doesn't support +// `__has_cpp_attribute` when c++14 is supported, otherwise +// __declspec(deprecated) is used as the alternative. +#ifndef C10_DEFINE_DEPRECATED_USING +#if defined(_MSVC_LANG) && _MSVC_LANG >= 201402L +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName [[deprecated]] = TypeThingy; +#else +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = __declspec(deprecated) TypeThingy; +#endif +#endif +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) && defined(__GNUC__) +// nvcc has a bug where it doesn't understand __attribute__((deprecated)) +// declarations even when the host compiler supports it. We'll only use this gcc +// attribute when not cuda, and when using a GCC compiler that doesn't support +// the c++14 syntax we checked for above (available in __GNUC__ >= 5) +#if !defined(__CUDACC__) +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName __attribute__((deprecated)) = TypeThingy; +#else +// using cuda + gcc < 5, neither deprecated syntax is available so turning off. +#define C10_DEFINE_DEPRECATED_USING(TypeName, TypeThingy) \ + using TypeName = TypeThingy; +#endif +#endif + +#if !defined(C10_DEFINE_DEPRECATED_USING) +#warning "You need to implement C10_DEFINE_DEPRECATED_USING for this compiler" +#define C10_DEFINE_DEPRECATED_USING +#endif diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..7800ceb29924a7a73677ecf7400107d7d11fdbc6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2-inl.h @@ -0,0 +1,283 @@ +#pragma once + +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +#define EXP_WIDTH_FP8 5 +#define MAN_WIDTH_FP8 2 +#define EXP_BIAS_FP8 15 + +namespace c10 { + +/// Constructors + +inline C10_HOST_DEVICE Float8_e5m2::Float8_e5m2(float value) + : x(detail::fp8e5m2_from_fp32_value(value)) {} + +/// Implicit conversions + +inline C10_HOST_DEVICE Float8_e5m2::operator float() const { + return detail::fp8e5m2_to_fp32_value(x); +} + +/// Special values helpers + +inline C10_HOST_DEVICE bool Float8_e5m2::isnan() const { + return (x & 0b01111111) > 0b01111100; +} + +inline C10_HOST_DEVICE bool Float8_e5m2::isinf() const { + return (x & 0b01111111) == 0b01111100; +} + +/// Arithmetic + +inline C10_HOST_DEVICE Float8_e5m2 +operator+(const Float8_e5m2& a, const Float8_e5m2& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 +operator-(const Float8_e5m2& a, const Float8_e5m2& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 +operator*(const Float8_e5m2& a, const Float8_e5m2& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 operator/( + const Float8_e5m2& a, + const Float8_e5m2& b) __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 operator-(const Float8_e5m2& a) { + return -static_cast(a); +} + +inline C10_HOST_DEVICE Float8_e5m2& operator+=( + Float8_e5m2& a, + const Float8_e5m2& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE Float8_e5m2& operator-=( + Float8_e5m2& a, + const Float8_e5m2& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE Float8_e5m2& operator*=( + Float8_e5m2& a, + const Float8_e5m2& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE Float8_e5m2& operator/=( + Float8_e5m2& a, + const Float8_e5m2& b) { + a = a / b; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(Float8_e5m2 a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(Float8_e5m2 a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(Float8_e5m2 a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(Float8_e5m2 a, float b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, Float8_e5m2 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, Float8_e5m2 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, Float8_e5m2 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, Float8_e5m2 b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const Float8_e5m2& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const Float8_e5m2& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const Float8_e5m2& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const Float8_e5m2& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(Float8_e5m2 a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(Float8_e5m2 a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(Float8_e5m2 a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(Float8_e5m2 a, double b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, Float8_e5m2 b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, Float8_e5m2 b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, Float8_e5m2 b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, Float8_e5m2 b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 operator+(int a, Float8_e5m2 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator-(int a, Float8_e5m2 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator*(int a, Float8_e5m2 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator/(int a, Float8_e5m2 b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE Float8_e5m2 operator+(Float8_e5m2 a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator-(Float8_e5m2 a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator*(Float8_e5m2 a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Float8_e5m2 operator/(Float8_e5m2 a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Float8_e5m2 operator+(int64_t a, Float8_e5m2 b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator-(int64_t a, Float8_e5m2 b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator*(int64_t a, Float8_e5m2 b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Float8_e5m2 operator/(int64_t a, Float8_e5m2 b) { + return static_cast(a) / b; +} + +/// NOTE: we do not define comparisons directly and instead rely on the implicit +/// conversion from c10::Float8_e5m2 to float. + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_specialized = true; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = false; + static constexpr bool has_signaling_NaN = false; + static constexpr auto has_denorm = true; + static constexpr auto has_denorm_loss = true; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = false; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 3; + static constexpr int digits10 = 0; + static constexpr int max_digits10 = 2; + static constexpr int radix = 2; + static constexpr int min_exponent = -13; + static constexpr int min_exponent10 = -4; + static constexpr int max_exponent = 16; + static constexpr int max_exponent10 = 4; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + + static constexpr c10::Float8_e5m2 min() { + return c10::Float8_e5m2(0x4, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 max() { + return c10::Float8_e5m2(0x7B, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 lowest() { + return c10::Float8_e5m2(0xFB, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 epsilon() { + return c10::Float8_e5m2(0x34, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 round_error() { + return c10::Float8_e5m2(0x38, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 infinity() { + return c10::Float8_e5m2(0x7C, c10::Float8_e5m2::from_bits()); + } + static constexpr c10::Float8_e5m2 denorm_min() { + return c10::Float8_e5m2(0x01, c10::Float8_e5m2::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..cad9762d4469f93a5129bdc2a9d551e0c34e03b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/Half-inl.h @@ -0,0 +1,350 @@ +#pragma once + +#include +#include + +#include +#include + +#ifdef __CUDACC__ +#include +#endif + +#ifdef __HIPCC__ +#include +#endif + +#if defined(CL_SYCL_LANGUAGE_VERSION) +#include // for SYCL 1.2.1 +#elif defined(SYCL_LANGUAGE_VERSION) +#include // for SYCL 2020 +#endif + +#if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) +#include +#endif + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +#if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +/// Constructors +inline Half::Half(float16_t value) : x(detail::fp16_to_bits(value)) {} +inline Half::operator float16_t() const { + return detail::fp16_from_bits(x); +} +#else + +inline C10_HOST_DEVICE Half::Half(float value) + : +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + x(__half_as_short(__float2half(value))) +#elif defined(__SYCL_DEVICE_ONLY__) + x(c10::bit_cast(sycl::half(value))) +#elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) + x(at::vec::float2half_scalar(value)) +#else + x(detail::fp16_ieee_from_fp32_value(value)) +#endif +{ +} + +/// Implicit conversions + +inline C10_HOST_DEVICE Half::operator float() const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + return __half2float(*reinterpret_cast(&x)); +#elif defined(__SYCL_DEVICE_ONLY__) + return float(c10::bit_cast(x)); +#elif (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \ + !defined(__APPLE__) + return at::vec::half2float_scalar(x); +#elif defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) + return detail::native_fp16_to_fp32_value(x); +#else + return detail::fp16_ieee_to_fp32_value(x); +#endif +} + +#endif /* !defined(__aarch64__) || defined(C10_MOBILE) || defined(__CUDACC__) \ + */ + +#if defined(__CUDACC__) || defined(__HIPCC__) +inline C10_HOST_DEVICE Half::Half(const __half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator __half() const { + return *reinterpret_cast(&x); +} +#endif + +#ifdef SYCL_LANGUAGE_VERSION +inline C10_HOST_DEVICE Half::Half(const sycl::half& value) { + x = *reinterpret_cast(&value); +} +inline C10_HOST_DEVICE Half::operator sycl::half() const { + return *reinterpret_cast(&x); +} +#endif + +// CUDA intrinsics + +#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 350)) || \ + (defined(__clang__) && defined(__CUDA__)) +inline __device__ Half __ldg(const Half* ptr) { + return __ldg(reinterpret_cast(ptr)); +} +#endif + +/// Arithmetic + +inline C10_HOST_DEVICE Half operator+(const Half& a, const Half& b) { + return static_cast(a) + static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a, const Half& b) { + return static_cast(a) - static_cast(b); +} + +inline C10_HOST_DEVICE Half operator*(const Half& a, const Half& b) { + return static_cast(a) * static_cast(b); +} + +inline C10_HOST_DEVICE Half operator/(const Half& a, const Half& b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator-(const Half& a) { +#if (defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 530) || \ + defined(__HIP_DEVICE_COMPILE__) + return __hneg(a); +#elif defined(__SYCL_DEVICE_ONLY__) + return -c10::bit_cast(a); +#else + return -static_cast(a); +#endif +} + +inline C10_HOST_DEVICE Half& operator+=(Half& a, const Half& b) { + a = a + b; + return a; +} + +inline C10_HOST_DEVICE Half& operator-=(Half& a, const Half& b) { + a = a - b; + return a; +} + +inline C10_HOST_DEVICE Half& operator*=(Half& a, const Half& b) { + a = a * b; + return a; +} + +inline C10_HOST_DEVICE Half& operator/=(Half& a, const Half& b) { + a = a / b; + return a; +} + +/// Arithmetic with floats + +inline C10_HOST_DEVICE float operator+(Half a, float b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE float operator-(Half a, float b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE float operator*(Half a, float b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE float operator/(Half a, float b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE float operator+(float a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE float operator-(float a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE float operator*(float a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE float operator/(float a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE float& operator+=(float& a, const Half& b) { + return a += static_cast(b); +} +inline C10_HOST_DEVICE float& operator-=(float& a, const Half& b) { + return a -= static_cast(b); +} +inline C10_HOST_DEVICE float& operator*=(float& a, const Half& b) { + return a *= static_cast(b); +} +inline C10_HOST_DEVICE float& operator/=(float& a, const Half& b) { + return a /= static_cast(b); +} + +/// Arithmetic with doubles + +inline C10_HOST_DEVICE double operator+(Half a, double b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE double operator-(Half a, double b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE double operator*(Half a, double b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE double operator/(Half a, double b) + __ubsan_ignore_float_divide_by_zero__ { + return static_cast(a) / b; +} + +inline C10_HOST_DEVICE double operator+(double a, Half b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE double operator-(double a, Half b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE double operator*(double a, Half b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE double operator/(double a, Half b) + __ubsan_ignore_float_divide_by_zero__ { + return a / static_cast(b); +} + +/// Arithmetic with ints + +inline C10_HOST_DEVICE Half operator+(Half a, int b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int a, Half b) { + return static_cast(a) / b; +} + +//// Arithmetic with int64_t + +inline C10_HOST_DEVICE Half operator+(Half a, int64_t b) { + return a + static_cast(b); +} +inline C10_HOST_DEVICE Half operator-(Half a, int64_t b) { + return a - static_cast(b); +} +inline C10_HOST_DEVICE Half operator*(Half a, int64_t b) { + return a * static_cast(b); +} +inline C10_HOST_DEVICE Half operator/(Half a, int64_t b) { + return a / static_cast(b); +} + +inline C10_HOST_DEVICE Half operator+(int64_t a, Half b) { + return static_cast(a) + b; +} +inline C10_HOST_DEVICE Half operator-(int64_t a, Half b) { + return static_cast(a) - b; +} +inline C10_HOST_DEVICE Half operator*(int64_t a, Half b) { + return static_cast(a) * b; +} +inline C10_HOST_DEVICE Half operator/(int64_t a, Half b) { + return static_cast(a) / b; +} + +/// NOTE: we do not define comparisons directly and instead rely on the implicit +/// conversion from c10::Half to float. + +} // namespace c10 + +namespace std { + +template <> +class numeric_limits { + public: + static constexpr bool is_specialized = true; + static constexpr bool is_signed = true; + static constexpr bool is_integer = false; + static constexpr bool is_exact = false; + static constexpr bool has_infinity = true; + static constexpr bool has_quiet_NaN = true; + static constexpr bool has_signaling_NaN = true; + static constexpr auto has_denorm = numeric_limits::has_denorm; + static constexpr auto has_denorm_loss = + numeric_limits::has_denorm_loss; + static constexpr auto round_style = numeric_limits::round_style; + static constexpr bool is_iec559 = true; + static constexpr bool is_bounded = true; + static constexpr bool is_modulo = false; + static constexpr int digits = 11; + static constexpr int digits10 = 3; + static constexpr int max_digits10 = 5; + static constexpr int radix = 2; + static constexpr int min_exponent = -13; + static constexpr int min_exponent10 = -4; + static constexpr int max_exponent = 16; + static constexpr int max_exponent10 = 4; + static constexpr auto traps = numeric_limits::traps; + static constexpr auto tinyness_before = + numeric_limits::tinyness_before; + static constexpr c10::Half min() { + return c10::Half(0x0400, c10::Half::from_bits()); + } + static constexpr c10::Half lowest() { + return c10::Half(0xFBFF, c10::Half::from_bits()); + } + static constexpr c10::Half max() { + return c10::Half(0x7BFF, c10::Half::from_bits()); + } + static constexpr c10::Half epsilon() { + return c10::Half(0x1400, c10::Half::from_bits()); + } + static constexpr c10::Half round_error() { + return c10::Half(0x3800, c10::Half::from_bits()); + } + static constexpr c10::Half infinity() { + return c10::Half(0x7C00, c10::Half::from_bits()); + } + static constexpr c10::Half quiet_NaN() { + return c10::Half(0x7E00, c10::Half::from_bits()); + } + static constexpr c10::Half signaling_NaN() { + return c10::Half(0x7D00, c10::Half::from_bits()); + } + static constexpr c10::Half denorm_min() { + return c10::Half(0x0001, c10::Half::from_bits()); + } +}; + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h new file mode 100644 index 0000000000000000000000000000000000000000..41f6d2db4acd5d98a60b5236592ab2109962bbe3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/MaybeOwned.h @@ -0,0 +1,237 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +/// MaybeOwnedTraits describes how to borrow from T. Here is how we +/// can implement borrowing from an arbitrary type T using a raw +/// pointer to const: +template +struct MaybeOwnedTraitsGenericImpl { + using owned_type = T; + using borrow_type = const T*; + + static borrow_type createBorrow(const owned_type& from) { + return &from; + } + + static void assignBorrow(borrow_type& lhs, borrow_type rhs) { + lhs = rhs; + } + + static void destroyBorrow(borrow_type& /*toDestroy*/) {} + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return *borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static bool debugBorrowIsValid(const borrow_type& borrow) { + return borrow != nullptr; + } +}; + +/// It is possible to eliminate the extra layer of indirection for +/// borrows for some types that we control. For examples, see +/// intrusive_ptr.h and TensorBody.h. + +template +struct MaybeOwnedTraits; + +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +template +struct MaybeOwnedTraits> + : public MaybeOwnedTraitsGenericImpl> {}; + +/// A smart pointer around either a borrowed or owned T. When +/// constructed with borrowed(), the caller MUST ensure that the +/// borrowed-from argument outlives this MaybeOwned. Compare to +/// Rust's std::borrow::Cow +/// (https://doc.rust-lang.org/std/borrow/enum.Cow.html), but note +/// that it is probably not suitable for general use because C++ has +/// no borrow checking. Included here to support +/// Tensor::expect_contiguous. +template +class MaybeOwned final { + using borrow_type = typename MaybeOwnedTraits::borrow_type; + using owned_type = typename MaybeOwnedTraits::owned_type; + + bool isBorrowed_; + union { + borrow_type borrow_; + owned_type own_; + }; + + /// Don't use this; use borrowed() instead. + explicit MaybeOwned(const owned_type& t) + : isBorrowed_(true), borrow_(MaybeOwnedTraits::createBorrow(t)) {} + + /// Don't use this; use owned() instead. + explicit MaybeOwned(T&& t) noexcept(std::is_nothrow_move_constructible_v) + : isBorrowed_(false), own_(std::move(t)) {} + + /// Don't use this; use owned() instead. + template + explicit MaybeOwned(std::in_place_t, Args&&... args) + : isBorrowed_(false), own_(std::forward(args)...) {} + + public: + explicit MaybeOwned() : isBorrowed_(true), borrow_() {} + + // Copying a borrow yields another borrow of the original, as with a + // T*. Copying an owned T yields another owned T for safety: no + // chains of borrowing by default! (Note you could get that behavior + // with MaybeOwned::borrowed(*rhs) if you wanted it.) + MaybeOwned(const MaybeOwned& rhs) : isBorrowed_(rhs.isBorrowed_) { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + new (&own_) T(rhs.own_); + } + } + + MaybeOwned& operator=(const MaybeOwned& rhs) { + if (this == &rhs) { + return *this; + } + if (C10_UNLIKELY(!isBorrowed_)) { + if (rhs.isBorrowed_) { + own_.~T(); + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + isBorrowed_ = true; + } else { + own_ = rhs.own_; + } + } else { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + new (&own_) T(rhs.own_); + isBorrowed_ = false; + } + } + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isBorrowed_ == rhs.isBorrowed_); + return *this; + } + + MaybeOwned(MaybeOwned&& rhs) noexcept( + // NOLINTNEXTLINE(*-noexcept-move-*) + std::is_nothrow_move_constructible_v && + std::is_nothrow_move_assignable_v) + : isBorrowed_(rhs.isBorrowed_) { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + new (&own_) T(std::move(rhs.own_)); + } + } + + MaybeOwned& operator=(MaybeOwned&& rhs) noexcept( + std::is_nothrow_move_assignable_v && + std::is_nothrow_move_assignable_v && + std::is_nothrow_move_constructible_v && + // NOLINTNEXTLINE(*-noexcept-move-*) + std::is_nothrow_destructible_v && + std::is_nothrow_destructible_v) { + if (this == &rhs) { + return *this; + } + if (C10_UNLIKELY(!isBorrowed_)) { + if (rhs.isBorrowed_) { + own_.~T(); + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + isBorrowed_ = true; + } else { + own_ = std::move(rhs.own_); + } + } else { + if (C10_LIKELY(rhs.isBorrowed_)) { + MaybeOwnedTraits::assignBorrow(borrow_, rhs.borrow_); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + new (&own_) T(std::move(rhs.own_)); + isBorrowed_ = false; + } + } + return *this; + } + + static MaybeOwned borrowed(const T& t) { + return MaybeOwned(t); + } + + static MaybeOwned owned(T&& t) noexcept( + std::is_nothrow_move_constructible_v) { + return MaybeOwned(std::move(t)); + } + + template + static MaybeOwned owned(std::in_place_t, Args&&... args) { + return MaybeOwned(std::in_place, std::forward(args)...); + } + + ~MaybeOwned() noexcept( + // NOLINTNEXTLINE(*-noexcept-destructor) + std::is_nothrow_destructible_v && + std::is_nothrow_destructible_v) { + if (C10_UNLIKELY(!isBorrowed_)) { + own_.~T(); + } else { + MaybeOwnedTraits::destroyBorrow(borrow_); + } + } + + // This is an implementation detail! You should know what you're doing + // if you are testing this. If you just want to guarantee ownership move + // this into a T + bool unsafeIsBorrowed() const { + return isBorrowed_; + } + + const T& operator*() const& { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + } + return C10_LIKELY(isBorrowed_) + ? MaybeOwnedTraits::referenceFromBorrow(borrow_) + : own_; + } + + const T* operator->() const { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + } + return C10_LIKELY(isBorrowed_) + ? MaybeOwnedTraits::pointerFromBorrow(borrow_) + : &own_; + } + + // If borrowed, copy the underlying T. If owned, move from + // it. borrowed/owned state remains the same, and either we + // reference the same borrow as before or we are an owned moved-from + // T. + T operator*() && { + if (isBorrowed_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + MaybeOwnedTraits::debugBorrowIsValid(borrow_)); + return MaybeOwnedTraits::referenceFromBorrow(borrow_); + } else { + return std::move(own_); + } + } +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..98237bba92f56d418421ccb422806c6bcef68afb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h @@ -0,0 +1,236 @@ +// This file defines OptionalArrayRef, a class that has almost the same +// exact functionality as std::optional>, except that its +// converting constructor fixes a dangling pointer issue. +// +// The implicit converting constructor of both std::optional> and +// std::optional> can cause the underlying ArrayRef to store +// a dangling pointer. OptionalArrayRef prevents this by wrapping +// a std::optional> and fixing the constructor implementation. +// +// See https://github.com/pytorch/pytorch/issues/63645 for more on this. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +template +class OptionalArrayRef final { + public: + // Constructors + + constexpr OptionalArrayRef() noexcept = default; + + constexpr OptionalArrayRef(nullopt_t) noexcept {} + + OptionalArrayRef(const OptionalArrayRef& other) = default; + + OptionalArrayRef(OptionalArrayRef&& other) noexcept = default; + + constexpr OptionalArrayRef(const optional>& other) noexcept + : wrapped_opt_array_ref(other) {} + + constexpr OptionalArrayRef(optional>&& other) noexcept + : wrapped_opt_array_ref(std::move(other)) {} + + constexpr OptionalArrayRef(const T& value) noexcept + : wrapped_opt_array_ref(value) {} + + template < + typename U = ArrayRef, + std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + !std::is_same_v, std::in_place_t> && + std::is_constructible_v, U&&> && + std::is_convertible_v> && + !std::is_convertible_v, + bool> = false> + constexpr OptionalArrayRef(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&>) + : wrapped_opt_array_ref(std::forward(value)) {} + + template < + typename U = ArrayRef, + std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + !std::is_same_v, std::in_place_t> && + std::is_constructible_v, U&&> && + !std::is_convertible_v>, + bool> = false> + constexpr explicit OptionalArrayRef(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&>) + : wrapped_opt_array_ref(std::forward(value)) {} + + template + constexpr explicit OptionalArrayRef( + std::in_place_t ip, + Args&&... args) noexcept + : wrapped_opt_array_ref(ip, std::forward(args)...) {} + + template + constexpr explicit OptionalArrayRef( + std::in_place_t ip, + std::initializer_list il, + Args&&... args) + : wrapped_opt_array_ref(ip, il, std::forward(args)...) {} + + constexpr OptionalArrayRef(const std::initializer_list& Vec) + : wrapped_opt_array_ref(ArrayRef(Vec)) {} + + // Destructor + + ~OptionalArrayRef() = default; + + // Assignment + + constexpr OptionalArrayRef& operator=(nullopt_t) noexcept { + wrapped_opt_array_ref = c10::nullopt; + return *this; + } + + OptionalArrayRef& operator=(const OptionalArrayRef& other) = default; + + OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default; + + constexpr OptionalArrayRef& operator=( + const optional>& other) noexcept { + wrapped_opt_array_ref = other; + return *this; + } + + constexpr OptionalArrayRef& operator=( + optional>&& other) noexcept { + wrapped_opt_array_ref = std::move(other); + return *this; + } + + template < + typename U = ArrayRef, + typename = std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + std::is_constructible_v, U&&> && + std::is_assignable_v&, U&&>>> + constexpr OptionalArrayRef& operator=(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&> && + std::is_nothrow_assignable_v&, U&&>) { + wrapped_opt_array_ref = std::forward(value); + return *this; + } + + // Observers + + constexpr ArrayRef* operator->() noexcept { + return &wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef* operator->() const noexcept { + return &wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef& operator*() & noexcept { + return wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef& operator*() const& noexcept { + return wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef&& operator*() && noexcept { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr const ArrayRef&& operator*() const&& noexcept { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr explicit operator bool() const noexcept { + return wrapped_opt_array_ref.has_value(); + } + + constexpr bool has_value() const noexcept { + return wrapped_opt_array_ref.has_value(); + } + + constexpr ArrayRef& value() & { + return wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef& value() const& { + return wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef&& value() && { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr const ArrayRef&& value() const&& { + return std::move(wrapped_opt_array_ref.value()); + } + + template + constexpr std:: + enable_if_t>, ArrayRef> + value_or(U&& default_value) const& { + return wrapped_opt_array_ref.value_or(std::forward(default_value)); + } + + template + constexpr std:: + enable_if_t>, ArrayRef> + value_or(U&& default_value) && { + return wrapped_opt_array_ref.value_or(std::forward(default_value)); + } + + // Modifiers + + constexpr void swap(OptionalArrayRef& other) noexcept { + std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref); + } + + constexpr void reset() noexcept { + wrapped_opt_array_ref.reset(); + } + + template + constexpr std:: + enable_if_t, Args&&...>, ArrayRef&> + emplace(Args&&... args) noexcept( + std::is_nothrow_constructible_v, Args&&...>) { + return wrapped_opt_array_ref.emplace(std::forward(args)...); + } + + template + constexpr ArrayRef& emplace( + std::initializer_list il, + Args&&... args) noexcept { + return wrapped_opt_array_ref.emplace(il, std::forward(args)...); + } + + private: + optional> wrapped_opt_array_ref; +}; + +using OptionalIntArrayRef = OptionalArrayRef; + +inline bool operator==( + const OptionalIntArrayRef& a1, + const IntArrayRef& other) { + if (!a1.has_value()) { + return false; + } + return a1.value() == other; +} + +inline bool operator==( + const c10::IntArrayRef& a1, + const c10::OptionalIntArrayRef& a2) { + return a2 == a1; +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h new file mode 100644 index 0000000000000000000000000000000000000000..850bb5d4c42697e323b15ce52e43cc2731075c16 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/ThreadLocal.h @@ -0,0 +1,153 @@ +#pragma once + +#include + +/** + * Android versions with libgnustl incorrectly handle thread_local C++ + * qualifier with composite types. NDK up to r17 version is affected. + * + * (A fix landed on Jun 4 2018: + * https://android-review.googlesource.com/c/toolchain/gcc/+/683601) + * + * In such cases, use c10::ThreadLocal wrapper + * which is `pthread_*` based with smart pointer semantics. + * + * In addition, convenient macro C10_DEFINE_TLS_static is available. + * To define static TLS variable of type std::string, do the following + * ``` + * C10_DEFINE_TLS_static(std::string, str_tls_); + * /////// + * { + * *str_tls_ = "abc"; + * assert(str_tls_->length(), 3); + * } + * ``` + * + * (see c10/test/util/ThreadLocal_test.cpp for more examples) + */ +#if !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) + +#if defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604 +#define C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE +#endif // defined(C10_ANDROID) && defined(__GLIBCXX__) && __GLIBCXX__ < 20180604 + +#endif // !defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) + +#if defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) +#include +#include +#include +#include +namespace c10 { + +/** + * @brief Temporary thread_local C++ qualifier replacement for Android + * based on `pthread_*`. + * To be used with composite types that provide default ctor. + */ +template +class ThreadLocal { + public: + ThreadLocal() { + pthread_key_create( + &key_, [](void* buf) { delete static_cast(buf); }); + } + + ~ThreadLocal() { + if (void* current = pthread_getspecific(key_)) { + delete static_cast(current); + } + + pthread_key_delete(key_); + } + + ThreadLocal(const ThreadLocal&) = delete; + ThreadLocal& operator=(const ThreadLocal&) = delete; + + Type& get() { + if (void* current = pthread_getspecific(key_)) { + return *static_cast(current); + } + + std::unique_ptr ptr = std::make_unique(); + if (0 == pthread_setspecific(key_, ptr.get())) { + return *ptr.release(); + } + + int err = errno; + TORCH_INTERNAL_ASSERT(false, "pthread_setspecific() failed, errno = ", err); + } + + Type& operator*() { + return get(); + } + + Type* operator->() { + return &get(); + } + + private: + pthread_key_t key_; +}; + +} // namespace c10 + +#define C10_DEFINE_TLS_static(Type, Name) static ::c10::ThreadLocal Name + +#define C10_DECLARE_TLS_class_static(Class, Type, Name) \ + static ::c10::ThreadLocal Name + +#define C10_DEFINE_TLS_class_static(Class, Type, Name) \ + ::c10::ThreadLocal Class::Name + +#else // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) + +namespace c10 { + +/** + * @brief Default thread_local implementation for non-Android cases. + * To be used with composite types that provide default ctor. + */ +template +class ThreadLocal { + public: + using Accessor = Type* (*)(); + explicit ThreadLocal(Accessor accessor) : accessor_(accessor) {} + + ThreadLocal(const ThreadLocal&) = delete; + ThreadLocal& operator=(const ThreadLocal&) = delete; + + Type& get() { + return *accessor_(); + } + + Type& operator*() { + return get(); + } + + Type* operator->() { + return &get(); + } + + private: + Accessor accessor_; +}; + +} // namespace c10 + +#define C10_DEFINE_TLS_static(Type, Name) \ + static ::c10::ThreadLocal Name([]() { \ + static thread_local Type var; \ + return &var; \ + }) + +#define C10_DECLARE_TLS_class_static(Class, Type, Name) \ + static ::c10::ThreadLocal Name + +#define C10_DEFINE_TLS_class_static(Class, Type, Name) \ + ::c10::ThreadLocal Class::Name([]() { \ + static thread_local Type var; \ + return &var; \ + }) + +#endif // defined(C10_PREFER_CUSTOM_THREAD_LOCAL_STORAGE) diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/copysign.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/copysign.h new file mode 100644 index 0000000000000000000000000000000000000000..e77b7e61b0aeffb1925ef97ab39664f94aad65a8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/copysign.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +namespace c10 { + +// Note: Explicit implementation of copysign for Half and BFloat16 +// is needed to workaround g++-7/8 crash on aarch64, but also makes +// copysign faster for the half-precision types +template +inline auto copysign(const T& a, const U& b) { + return std::copysign(a, b); +} + +// Implement copysign for half precision floats using bit ops +// Sign is the most significant bit for both half and bfloat16 types +inline c10::Half copysign(c10::Half a, c10::Half b) { + return c10::Half((a.x & 0x7fff) | (b.x & 0x8000), c10::Half::from_bits()); +} + +inline c10::BFloat16 copysign(c10::BFloat16 a, c10::BFloat16 b) { + return c10::BFloat16( + (a.x & 0x7fff) | (b.x & 0x8000), c10::BFloat16::from_bits()); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/logging_is_google_glog.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/logging_is_google_glog.h new file mode 100644 index 0000000000000000000000000000000000000000..e5470d22cecd34cff4cb8e324d743fa354d285b8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/logging_is_google_glog.h @@ -0,0 +1,109 @@ +#ifndef C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_ +#define C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_ + +#include +#include +#include + +#include // because some of the caffe2 code uses e.g. std::setw +// Using google glog. For glog 0.3.2 versions, stl_logging.h needs to be before +// logging.h to actually use stl_logging. Because template magic. +// In addition, we do not do stl logging in .cu files because nvcc does not like +// it. Some mobile platforms do not like stl_logging, so we add an +// overload in that case as well. + +#ifdef __CUDACC__ +#include +#endif + +#if !defined(__CUDACC__) && !defined(C10_USE_MINIMAL_GLOG) +#include + +// Old versions of glog don't declare this using declaration, so help +// them out. Fortunately, C++ won't complain if you declare the same +// using declaration multiple times. +namespace std { +using ::operator<<; +} + +#else // !defined(__CUDACC__) && !defined(C10_USE_MINIMAL_GLOG) + +// In the cudacc compiler scenario, we will simply ignore the container +// printout feature. Basically we need to register a fake overload for +// vector/string - here, we just ignore the entries in the logs. + +namespace std { +#define INSTANTIATE_FOR_CONTAINER(container) \ + template \ + ostream& operator<<(ostream& out, const container&) { \ + return out; \ + } + +INSTANTIATE_FOR_CONTAINER(vector) +INSTANTIATE_FOR_CONTAINER(map) +INSTANTIATE_FOR_CONTAINER(set) +#undef INSTANTIATE_FOR_CONTAINER +} // namespace std + +#endif + +#include + +// Additional macros on top of glog +#define TORCH_CHECK_EQ(val1, val2) CHECK_EQ(val1, val2) +#define TORCH_CHECK_NE(val1, val2) CHECK_NE(val1, val2) +#define TORCH_CHECK_LE(val1, val2) CHECK_LE(val1, val2) +#define TORCH_CHECK_LT(val1, val2) CHECK_LT(val1, val2) +#define TORCH_CHECK_GE(val1, val2) CHECK_GE(val1, val2) +#define TORCH_CHECK_GT(val1, val2) CHECK_GT(val1, val2) + +#ifndef NDEBUG +#define TORCH_DCHECK_EQ(val1, val2) DCHECK_EQ(val1, val2) +#define TORCH_DCHECK_NE(val1, val2) DCHECK_NE(val1, val2) +#define TORCH_DCHECK_LE(val1, val2) DCHECK_LE(val1, val2) +#define TORCH_DCHECK_LT(val1, val2) DCHECK_LT(val1, val2) +#define TORCH_DCHECK_GE(val1, val2) DCHECK_GE(val1, val2) +#define TORCH_DCHECK_GT(val1, val2) DCHECK_GT(val1, val2) +#else // !NDEBUG +// These versions generate no code in optimized mode. +#define TORCH_DCHECK_EQ(val1, val2) \ + while (false) \ + DCHECK_EQ(val1, val2) +#define TORCH_DCHECK_NE(val1, val2) \ + while (false) \ + DCHECK_NE(val1, val2) +#define TORCH_DCHECK_LE(val1, val2) \ + while (false) \ + DCHECK_LE(val1, val2) +#define TORCH_DCHECK_LT(val1, val2) \ + while (false) \ + DCHECK_LT(val1, val2) +#define TORCH_DCHECK_GE(val1, val2) \ + while (false) \ + DCHECK_GE(val1, val2) +#define TORCH_DCHECK_GT(val1, val2) \ + while (false) \ + DCHECK_GT(val1, val2) +#endif // NDEBUG + +// Check that a pointer is not null. +#define TORCH_CHECK_NOTNULL(val) CHECK_NOTNULL(val) + +#ifndef NDEBUG +// Debug only version of TORCH_CHECK_NOTNULL +#define TORCH_DCHECK_NOTNULL(val) DCHECK_NOTNULL(val) +#else // !NDEBUG +// Optimized version - generates no code. +#define TORCH_DCHECK_NOTNULL(val) \ + while (false) \ + DCHECK_NOTNULL(val) +#endif // NDEBUG + +// Log with source location information override (to be used in generic +// warning/error handlers implemented as functions, not macros) +// +// Note, we don't respect GOOGLE_STRIP_LOG here for simplicity +#define LOG_AT_FILE_LINE(n, file, line) \ + ::google::LogMessage(file, line, ::google::GLOG_##n).stream() + +#endif // C10_UTIL_LOGGING_IS_GOOGLE_GLOG_H_ diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h b/parrot/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h new file mode 100644 index 0000000000000000000000000000000000000000..2420abf7ad06f726998e94439066fc1040c32455 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/util/thread_name.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +#include + +namespace c10 { + +C10_API void setThreadName(std::string name); + +} // namespace c10