diff --git a/.gitattributes b/.gitattributes index 75417c83af3d005b553ffae17b28ea447fcf5830..256df1104e44041ba506f5dd8699a58a87550447 100644 --- a/.gitattributes +++ b/.gitattributes @@ -674,3 +674,11 @@ llava_video/lib/python3.10/site-packages/matplotlib/backends/_tkagg.cpython-310- llava_video/lib/python3.10/site-packages/matplotlib/__pycache__/widgets.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text llava_video/lib/python3.10/site-packages/matplotlib/__pycache__/colors.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text moondream/bin/python filter=lfs diff=lfs merge=lfs -text +mplug_owl2/compiler_compat/ld filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libreadline.so.8.2 filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libstdc++.so.6 filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libz.so filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libreadline.a filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libbz2.a filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libsqlite3.so filter=lfs diff=lfs merge=lfs -text +mplug_owl2/lib/libquadmath.so.0 filter=lfs diff=lfs merge=lfs -text diff --git a/mplug_owl2/compiler_compat/ld b/mplug_owl2/compiler_compat/ld new file mode 100644 index 0000000000000000000000000000000000000000..5d9795f7d0788693c2421b25013239554ebf54b6 --- /dev/null +++ b/mplug_owl2/compiler_compat/ld @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaaab6b3200c6f71e5f2970b01a074c958d5af546e5f43c011192307f69d9cac +size 2195376 diff --git a/mplug_owl2/lib/libbz2.a b/mplug_owl2/lib/libbz2.a new file mode 100644 index 0000000000000000000000000000000000000000..996c44d47e73d90bf5155c58e7121f8c6aeccb3e --- /dev/null +++ b/mplug_owl2/lib/libbz2.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4377dc3d8f7542568b6365cd6bb06970b53c20e9a71b7d54271874f7868be500 +size 264138 diff --git a/mplug_owl2/lib/libffi.so.8 b/mplug_owl2/lib/libffi.so.8 new file mode 100644 index 0000000000000000000000000000000000000000..dd23bfd7f0f937627caf212c56095fd6b257f4ea Binary files /dev/null and b/mplug_owl2/lib/libffi.so.8 differ diff --git a/mplug_owl2/lib/libformw.so.6 b/mplug_owl2/lib/libformw.so.6 new file mode 100644 index 0000000000000000000000000000000000000000..7baed3c80a7b6c4b6af47d0ab7730cfd8e6a2367 Binary files /dev/null and b/mplug_owl2/lib/libformw.so.6 differ diff --git a/mplug_owl2/lib/libpanelw.so b/mplug_owl2/lib/libpanelw.so new file mode 100644 index 0000000000000000000000000000000000000000..0abaf7359565d5b177893357615e4d1e779068e5 Binary files /dev/null and b/mplug_owl2/lib/libpanelw.so differ diff --git a/mplug_owl2/lib/libpanelw.so.6 b/mplug_owl2/lib/libpanelw.so.6 new file mode 100644 index 0000000000000000000000000000000000000000..0abaf7359565d5b177893357615e4d1e779068e5 Binary files /dev/null and b/mplug_owl2/lib/libpanelw.so.6 differ diff --git a/mplug_owl2/lib/libquadmath.so.0 b/mplug_owl2/lib/libquadmath.so.0 new file mode 100644 index 0000000000000000000000000000000000000000..cd082c15339041642f25b85e32bd65b9e0393619 --- /dev/null +++ b/mplug_owl2/lib/libquadmath.so.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10c6fadba4c2f6d77e836a50aadbd92e95b137a85eb01b1ca183b50d8f39a2c6 +size 1009408 diff --git a/mplug_owl2/lib/libreadline.a b/mplug_owl2/lib/libreadline.a new file mode 100644 index 0000000000000000000000000000000000000000..ddfa236a3a6b4ac6383a3befb5ea24fbd7edaa47 --- /dev/null +++ b/mplug_owl2/lib/libreadline.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34edb0aaf24f86fa37e869bb46389534179d560e141a744b15d854497148663a +size 749782 diff --git a/mplug_owl2/lib/libreadline.so.8.2 b/mplug_owl2/lib/libreadline.so.8.2 new file mode 100644 index 0000000000000000000000000000000000000000..3d1ca162d3248c9d4e76c84be87174ab49829fe1 --- /dev/null +++ b/mplug_owl2/lib/libreadline.so.8.2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b6a9d66a73115c872272bb479c8f29df1bf7d9b16e5913e083d64b7753ce1c6 +size 411464 diff --git a/mplug_owl2/lib/libsqlite3.so b/mplug_owl2/lib/libsqlite3.so new file mode 100644 index 0000000000000000000000000000000000000000..531fb86e0309a27d33fb4bc03e4442023e5cd590 --- /dev/null +++ b/mplug_owl2/lib/libsqlite3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71932eb5bf89092fbd2c900601fc9f24aa184d65038aaec2445fd11b1d923327 +size 1543808 diff --git a/mplug_owl2/lib/libstdc++.so.6 b/mplug_owl2/lib/libstdc++.so.6 new file mode 100644 index 0000000000000000000000000000000000000000..577831ce0f1dc31bc63f9d1060ba2c774fd05303 --- /dev/null +++ b/mplug_owl2/lib/libstdc++.so.6 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f045231ff3a95c2fbfde450575f0ef45d23e95be15193c8729b521fc363ece4 +size 17981480 diff --git a/mplug_owl2/lib/libtclstub8.6.a b/mplug_owl2/lib/libtclstub8.6.a new file mode 100644 index 0000000000000000000000000000000000000000..9d28825e8872d4a8f2b8d1929bf27804c7c58fcd Binary files /dev/null and b/mplug_owl2/lib/libtclstub8.6.a differ diff --git a/mplug_owl2/lib/libz.so b/mplug_owl2/lib/libz.so new file mode 100644 index 0000000000000000000000000000000000000000..64cd6a309bad00dc38040a577191f6681e18bf89 --- /dev/null +++ b/mplug_owl2/lib/libz.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914 +size 124744 diff --git a/mplug_owl2/lib/tcl8.6/auto.tcl b/mplug_owl2/lib/tcl8.6/auto.tcl new file mode 100644 index 0000000000000000000000000000000000000000..f293a38f0433f426e30a168392075f52140dc678 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/auto.tcl @@ -0,0 +1,648 @@ +# auto.tcl -- +# +# utility procs formerly in init.tcl dealing with auto execution of commands +# and can be auto loaded themselves. +# +# Copyright (c) 1991-1993 The Regents of the University of California. +# Copyright (c) 1994-1998 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution of +# this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +# auto_reset -- +# +# Destroy all cached information for auto-loading and auto-execution, so that +# the information gets recomputed the next time it's needed. Also delete any +# commands that are listed in the auto-load index. +# +# Arguments: +# None. + +proc auto_reset {} { + global auto_execs auto_index auto_path + if {[array exists auto_index]} { + foreach cmdName [array names auto_index] { + set fqcn [namespace which $cmdName] + if {$fqcn eq ""} { + continue + } + rename $fqcn {} + } + } + unset -nocomplain auto_execs auto_index ::tcl::auto_oldpath + if {[catch {llength $auto_path}]} { + set auto_path [list [info library]] + } elseif {[info library] ni $auto_path} { + lappend auto_path [info library] + } +} + +# tcl_findLibrary -- +# +# This is a utility for extensions that searches for a library directory +# using a canonical searching algorithm. A side effect is to source the +# initialization script and set a global library variable. +# +# Arguments: +# basename Prefix of the directory name, (e.g., "tk") +# version Version number of the package, (e.g., "8.0") +# patch Patchlevel of the package, (e.g., "8.0.3") +# initScript Initialization script to source (e.g., tk.tcl) +# enVarName environment variable to honor (e.g., TK_LIBRARY) +# varName Global variable to set when done (e.g., tk_library) + +proc tcl_findLibrary {basename version patch initScript enVarName varName} { + upvar #0 $varName the_library + global auto_path env tcl_platform + + set dirs {} + set errors {} + + # The C application may have hardwired a path, which we honor + + if {[info exists the_library] && $the_library ne ""} { + lappend dirs $the_library + } else { + # Do the canonical search + + # 1. From an environment variable, if it exists. Placing this first + # gives the end-user ultimate control to work-around any bugs, or + # to customize. + + if {[info exists env($enVarName)]} { + lappend dirs $env($enVarName) + } + + # 2. In the package script directory registered within the + # configuration of the package itself. + + catch { + lappend dirs [::${basename}::pkgconfig get scriptdir,runtime] + } + + # 3. Relative to auto_path directories. This checks relative to the + # Tcl library as well as allowing loading of libraries added to the + # auto_path that is not relative to the core library or binary paths. + foreach d $auto_path { + lappend dirs [file join $d $basename$version] + if {$tcl_platform(platform) eq "unix" + && $tcl_platform(os) eq "Darwin"} { + # 4. On MacOSX, check the Resources/Scripts subdir too + lappend dirs [file join $d $basename$version Resources Scripts] + } + } + + # 3. Various locations relative to the executable + # ../lib/foo1.0 (From bin directory in install hierarchy) + # ../../lib/foo1.0 (From bin/arch directory in install hierarchy) + # ../library (From unix directory in build hierarchy) + # + # Remaining locations are out of date (when relevant, they ought to be + # covered by the $::auto_path seach above) and disabled. + # + # ../../library (From unix/arch directory in build hierarchy) + # ../../foo1.0.1/library + # (From unix directory in parallel build hierarchy) + # ../../../foo1.0.1/library + # (From unix/arch directory in parallel build hierarchy) + + set parentDir [file dirname [file dirname [info nameofexecutable]]] + set grandParentDir [file dirname $parentDir] + lappend dirs [file join $parentDir lib $basename$version] + lappend dirs [file join $grandParentDir lib $basename$version] + lappend dirs [file join $parentDir library] + if {0} { + lappend dirs [file join $grandParentDir library] + lappend dirs [file join $grandParentDir $basename$patch library] + lappend dirs [file join [file dirname $grandParentDir] \ + $basename$patch library] + } + } + # make $dirs unique, preserving order + array set seen {} + foreach i $dirs { + # Make sure $i is unique under normalization. Avoid repeated [source]. + if {[interp issafe]} { + # Safe interps have no [file normalize]. + set norm $i + } else { + set norm [file normalize $i] + } + if {[info exists seen($norm)]} { + continue + } + set seen($norm) {} + + set the_library $i + set file [file join $i $initScript] + + # source everything when in a safe interpreter because we have a + # source command, but no file exists command + + if {[interp issafe] || [file exists $file]} { + if {![catch {uplevel #0 [list source $file]} msg opts]} { + return + } + append errors "$file: $msg\n" + append errors [dict get $opts -errorinfo]\n + } + } + unset -nocomplain the_library + set msg "Can't find a usable $initScript in the following directories: \n" + append msg " $dirs\n\n" + append msg "$errors\n\n" + append msg "This probably means that $basename wasn't installed properly.\n" + error $msg +} + + +# ---------------------------------------------------------------------- +# auto_mkindex +# ---------------------------------------------------------------------- +# The following procedures are used to generate the tclIndex file from Tcl +# source files. They use a special safe interpreter to parse Tcl source +# files, writing out index entries as "proc" commands are encountered. This +# implementation won't work in a safe interpreter, since a safe interpreter +# can't create the special parser and mess with its commands. + +if {[interp issafe]} { + return ;# Stop sourcing the file here +} + +# auto_mkindex -- +# Regenerate a tclIndex file from Tcl source files. Takes as argument the +# name of the directory in which the tclIndex file is to be placed, followed +# by any number of glob patterns to use in that directory to locate all of the +# relevant files. +# +# Arguments: +# dir - Name of the directory in which to create an index. + +# args - Any number of additional arguments giving the names of files +# within dir. If no additional are given auto_mkindex will look +# for *.tcl. + +proc auto_mkindex {dir args} { + if {[interp issafe]} { + error "can't generate index within safe interpreter" + } + + set oldDir [pwd] + cd $dir + + append index "# Tcl autoload index file, version 2.0\n" + append index "# This file is generated by the \"auto_mkindex\" command\n" + append index "# and sourced to set up indexing information for one or\n" + append index "# more commands. Typically each line is a command that\n" + append index "# sets an element in the auto_index array, where the\n" + append index "# element name is the name of a command and the value is\n" + append index "# a script that loads the command.\n\n" + if {![llength $args]} { + set args *.tcl + } + + auto_mkindex_parser::init + foreach file [lsort [glob -- {*}$args]] { + try { + append index [auto_mkindex_parser::mkindex $file] + } on error {msg opts} { + cd $oldDir + return -options $opts $msg + } + } + auto_mkindex_parser::cleanup + + set fid [open "tclIndex" w] + puts -nonewline $fid $index + close $fid + cd $oldDir +} + +# Original version of auto_mkindex that just searches the source code for +# "proc" at the beginning of the line. + +proc auto_mkindex_old {dir args} { + set oldDir [pwd] + cd $dir + set dir [pwd] + append index "# Tcl autoload index file, version 2.0\n" + append index "# This file is generated by the \"auto_mkindex\" command\n" + append index "# and sourced to set up indexing information for one or\n" + append index "# more commands. Typically each line is a command that\n" + append index "# sets an element in the auto_index array, where the\n" + append index "# element name is the name of a command and the value is\n" + append index "# a script that loads the command.\n\n" + if {![llength $args]} { + set args *.tcl + } + foreach file [lsort [glob -- {*}$args]] { + set f "" + set error [catch { + set f [open $file] + fconfigure $f -eofchar "\x1A {}" + while {[gets $f line] >= 0} { + if {[regexp {^proc[ ]+([^ ]*)} $line match procName]} { + set procName [lindex [auto_qualify $procName "::"] 0] + append index "set [list auto_index($procName)]" + append index " \[list source \[file join \$dir [list $file]\]\]\n" + } + } + close $f + } msg opts] + if {$error} { + catch {close $f} + cd $oldDir + return -options $opts $msg + } + } + set f "" + set error [catch { + set f [open tclIndex w] + puts -nonewline $f $index + close $f + cd $oldDir + } msg opts] + if {$error} { + catch {close $f} + cd $oldDir + error $msg $info $code + return -options $opts $msg + } +} + +# Create a safe interpreter that can be used to parse Tcl source files +# generate a tclIndex file for autoloading. This interp contains commands for +# things that need index entries. Each time a command is executed, it writes +# an entry out to the index file. + +namespace eval auto_mkindex_parser { + variable parser "" ;# parser used to build index + variable index "" ;# maintains index as it is built + variable scriptFile "" ;# name of file being processed + variable contextStack "" ;# stack of namespace scopes + variable imports "" ;# keeps track of all imported cmds + variable initCommands ;# list of commands that create aliases + if {![info exists initCommands]} { + set initCommands [list] + } + + proc init {} { + variable parser + variable initCommands + + if {![interp issafe]} { + set parser [interp create -safe] + $parser hide info + $parser hide rename + $parser hide proc + $parser hide namespace + $parser hide eval + $parser hide puts + foreach ns [$parser invokehidden namespace children ::] { + # MUST NOT DELETE "::tcl" OR BAD THINGS HAPPEN! + if {$ns eq "::tcl"} continue + $parser invokehidden namespace delete $ns + } + foreach cmd [$parser invokehidden info commands ::*] { + $parser invokehidden rename $cmd {} + } + $parser invokehidden proc unknown {args} {} + + # We'll need access to the "namespace" command within the + # interp. Put it back, but move it out of the way. + + $parser expose namespace + $parser invokehidden rename namespace _%@namespace + $parser expose eval + $parser invokehidden rename eval _%@eval + + # Install all the registered pseudo-command implementations + + foreach cmd $initCommands { + eval $cmd + } + } + } + proc cleanup {} { + variable parser + interp delete $parser + unset parser + } +} + +# auto_mkindex_parser::mkindex -- +# +# Used by the "auto_mkindex" command to create a "tclIndex" file for the given +# Tcl source file. Executes the commands in the file, and handles things like +# the "proc" command by adding an entry for the index file. Returns a string +# that represents the index file. +# +# Arguments: +# file Name of Tcl source file to be indexed. + +proc auto_mkindex_parser::mkindex {file} { + variable parser + variable index + variable scriptFile + variable contextStack + variable imports + + set scriptFile $file + + set fid [open $file] + fconfigure $fid -eofchar "\x1A {}" + set contents [read $fid] + close $fid + + # There is one problem with sourcing files into the safe interpreter: + # references like "$x" will fail since code is not really being executed + # and variables do not really exist. To avoid this, we replace all $ with + # \0 (literally, the null char) later, when getting proc names we will + # have to reverse this replacement, in case there were any $ in the proc + # name. This will cause a problem if somebody actually tries to have a \0 + # in their proc name. Too bad for them. + set contents [string map [list \$ \0] $contents] + + set index "" + set contextStack "" + set imports "" + + $parser eval $contents + + foreach name $imports { + catch {$parser eval [list _%@namespace forget $name]} + } + return $index +} + +# auto_mkindex_parser::hook command +# +# Registers a Tcl command to evaluate when initializing the child interpreter +# used by the mkindex parser. The command is evaluated in the parent +# interpreter, and can use the variable auto_mkindex_parser::parser to get to +# the child + +proc auto_mkindex_parser::hook {cmd} { + variable initCommands + + lappend initCommands $cmd +} + +# auto_mkindex_parser::slavehook command +# +# Registers a Tcl command to evaluate when initializing the child interpreter +# used by the mkindex parser. The command is evaluated in the child +# interpreter. + +proc auto_mkindex_parser::slavehook {cmd} { + variable initCommands + + # The $parser variable is defined to be the name of the child interpreter + # when this command is used later. + + lappend initCommands "\$parser eval [list $cmd]" +} + +# auto_mkindex_parser::command -- +# +# Registers a new command with the "auto_mkindex_parser" interpreter that +# parses Tcl files. These commands are fake versions of things like the +# "proc" command. When you execute them, they simply write out an entry to a +# "tclIndex" file for auto-loading. +# +# This procedure allows extensions to register their own commands with the +# auto_mkindex facility. For example, a package like [incr Tcl] might +# register a "class" command so that class definitions could be added to a +# "tclIndex" file for auto-loading. +# +# Arguments: +# name Name of command recognized in Tcl files. +# arglist Argument list for command. +# body Implementation of command to handle indexing. + +proc auto_mkindex_parser::command {name arglist body} { + hook [list auto_mkindex_parser::commandInit $name $arglist $body] +} + +# auto_mkindex_parser::commandInit -- +# +# This does the actual work set up by auto_mkindex_parser::command. This is +# called when the interpreter used by the parser is created. +# +# Arguments: +# name Name of command recognized in Tcl files. +# arglist Argument list for command. +# body Implementation of command to handle indexing. + +proc auto_mkindex_parser::commandInit {name arglist body} { + variable parser + + set ns [namespace qualifiers $name] + set tail [namespace tail $name] + if {$ns eq ""} { + set fakeName [namespace current]::_%@fake_$tail + } else { + set fakeName [namespace current]::[string map {:: _} _%@fake_$name] + } + proc $fakeName $arglist $body + + # YUK! Tcl won't let us alias fully qualified command names, so we can't + # handle names like "::itcl::class". Instead, we have to build procs with + # the fully qualified names, and have the procs point to the aliases. + + if {[string match *::* $name]} { + set exportCmd [list _%@namespace export [namespace tail $name]] + $parser eval [list _%@namespace eval $ns $exportCmd] + + # The following proc definition does not work if you want to tolerate + # space or something else diabolical in the procedure name, (i.e., + # space in $alias). The following does not work: + # "_%@eval {$alias} \$args" + # because $alias gets concat'ed to $args. The following does not work + # because $cmd is somehow undefined + # "set cmd {$alias} \; _%@eval {\$cmd} \$args" + # A gold star to someone that can make test autoMkindex-3.3 work + # properly + + set alias [namespace tail $fakeName] + $parser invokehidden proc $name {args} "_%@eval {$alias} \$args" + $parser alias $alias $fakeName + } else { + $parser alias $name $fakeName + } + return +} + +# auto_mkindex_parser::fullname -- +# +# Used by commands like "proc" within the auto_mkindex parser. Returns the +# qualified namespace name for the "name" argument. If the "name" does not +# start with "::", elements are added from the current namespace stack to +# produce a qualified name. Then, the name is examined to see whether or not +# it should really be qualified. If the name has more than the leading "::", +# it is returned as a fully qualified name. Otherwise, it is returned as a +# simple name. That way, the Tcl autoloader will recognize it properly. +# +# Arguments: +# name - Name that is being added to index. + +proc auto_mkindex_parser::fullname {name} { + variable contextStack + + if {![string match ::* $name]} { + foreach ns $contextStack { + set name "${ns}::$name" + if {[string match ::* $name]} { + break + } + } + } + + if {[namespace qualifiers $name] eq ""} { + set name [namespace tail $name] + } elseif {![string match ::* $name]} { + set name "::$name" + } + + # Earlier, mkindex replaced all $'s with \0. Now, we have to reverse that + # replacement. + return [string map [list \0 \$] $name] +} + +# auto_mkindex_parser::indexEntry -- +# +# Used by commands like "proc" within the auto_mkindex parser to add a +# correctly-quoted entry to the index. This is shared code so it is done +# *right*, in one place. +# +# Arguments: +# name - Name that is being added to index. + +proc auto_mkindex_parser::indexEntry {name} { + variable index + variable scriptFile + + # We convert all metacharacters to their backslashed form, and pre-split + # the file name that we know about (which will be a proper list, and so + # correctly quoted). + + set name [string range [list \}[fullname $name]] 2 end] + set filenameParts [file split $scriptFile] + + append index [format \ + {set auto_index(%s) [list source [file join $dir %s]]%s} \ + $name $filenameParts \n] + return +} + +if {[llength $::auto_mkindex_parser::initCommands]} { + return +} + +# Register all of the procedures for the auto_mkindex parser that will build +# the "tclIndex" file. + +# AUTO MKINDEX: proc name arglist body +# Adds an entry to the auto index list for the given procedure name. + +auto_mkindex_parser::command proc {name args} { + indexEntry $name +} + +# Conditionally add support for Tcl byte code files. There are some tricky +# details here. First, we need to get the tbcload library initialized in the +# current interpreter. We cannot load tbcload into the child until we have +# done so because it needs access to the tcl_patchLevel variable. Second, +# because the package index file may defer loading the library until we invoke +# a command, we need to explicitly invoke auto_load to force it to be loaded. +# This should be a noop if the package has already been loaded + +auto_mkindex_parser::hook { + try { + package require tbcload + } on error {} { + # OK, don't have it so do nothing + } on ok {} { + if {[namespace which -command tbcload::bcproc] eq ""} { + auto_load tbcload::bcproc + } + load {} tbcload $auto_mkindex_parser::parser + + # AUTO MKINDEX: tbcload::bcproc name arglist body + # Adds an entry to the auto index list for the given precompiled + # procedure name. + + auto_mkindex_parser::commandInit tbcload::bcproc {name args} { + indexEntry $name + } + } +} + +# AUTO MKINDEX: namespace eval name command ?arg arg...? +# Adds the namespace name onto the context stack and evaluates the associated +# body of commands. +# +# AUTO MKINDEX: namespace import ?-force? pattern ?pattern...? +# Performs the "import" action in the parser interpreter. This is important +# for any commands contained in a namespace that affect the index. For +# example, a script may say "itcl::class ...", or it may import "itcl::*" and +# then say "class ...". This procedure does the import operation, but keeps +# track of imported patterns so we can remove the imports later. + +auto_mkindex_parser::command namespace {op args} { + switch -- $op { + eval { + variable parser + variable contextStack + + set name [lindex $args 0] + set args [lrange $args 1 end] + + set contextStack [linsert $contextStack 0 $name] + $parser eval [list _%@namespace eval $name] $args + set contextStack [lrange $contextStack 1 end] + } + import { + variable parser + variable imports + foreach pattern $args { + if {$pattern ne "-force"} { + lappend imports $pattern + } + } + catch {$parser eval "_%@namespace import $args"} + } + ensemble { + variable parser + variable contextStack + if {[lindex $args 0] eq "create"} { + set name ::[join [lreverse $contextStack] ::] + catch { + set name [dict get [lrange $args 1 end] -command] + if {![string match ::* $name]} { + set name ::[join [lreverse $contextStack] ::]$name + } + regsub -all ::+ $name :: name + } + # create artificial proc to force an entry in the tclIndex + $parser eval [list ::proc $name {} {}] + } + } + } +} + +# AUTO MKINDEX: oo::class create name ?definition? +# Adds an entry to the auto index list for the given class name. +auto_mkindex_parser::command oo::class {op name {body ""}} { + if {$op eq "create"} { + indexEntry $name + } +} +auto_mkindex_parser::command class {op name {body ""}} { + if {$op eq "create"} { + indexEntry $name + } +} + +return diff --git a/mplug_owl2/lib/tcl8.6/encoding/cp1252.enc b/mplug_owl2/lib/tcl8.6/encoding/cp1252.enc new file mode 100644 index 0000000000000000000000000000000000000000..dd525ea4c58e6abd0f75552907e051b51bd49145 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/encoding/cp1252.enc @@ -0,0 +1,20 @@ +# Encoding file: cp1252, single-byte +S +003F 0 1 +00 +0000000100020003000400050006000700080009000A000B000C000D000E000F +0010001100120013001400150016001700180019001A001B001C001D001E001F +0020002100220023002400250026002700280029002A002B002C002D002E002F +0030003100320033003400350036003700380039003A003B003C003D003E003F +0040004100420043004400450046004700480049004A004B004C004D004E004F +0050005100520053005400550056005700580059005A005B005C005D005E005F +0060006100620063006400650066006700680069006A006B006C006D006E006F +0070007100720073007400750076007700780079007A007B007C007D007E007F +20AC0081201A0192201E20262020202102C62030016020390152008D017D008F +009020182019201C201D20222013201402DC21220161203A0153009D017E0178 +00A000A100A200A300A400A500A600A700A800A900AA00AB00AC00AD00AE00AF +00B000B100B200B300B400B500B600B700B800B900BA00BB00BC00BD00BE00BF +00C000C100C200C300C400C500C600C700C800C900CA00CB00CC00CD00CE00CF +00D000D100D200D300D400D500D600D700D800D900DA00DB00DC00DD00DE00DF +00E000E100E200E300E400E500E600E700E800E900EA00EB00EC00ED00EE00EF +00F000F100F200F300F400F500F600F700F800F900FA00FB00FC00FD00FE00FF diff --git a/mplug_owl2/lib/tcl8.6/encoding/cp860.enc b/mplug_owl2/lib/tcl8.6/encoding/cp860.enc new file mode 100644 index 0000000000000000000000000000000000000000..871943b3718441d70bb405ff6f20afa94539997f --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/encoding/cp860.enc @@ -0,0 +1,20 @@ +# Encoding file: cp860, single-byte +S +003F 0 1 +00 +0000000100020003000400050006000700080009000A000B000C000D000E000F +0010001100120013001400150016001700180019001A001B001C001D001E001F +0020002100220023002400250026002700280029002A002B002C002D002E002F +0030003100320033003400350036003700380039003A003B003C003D003E003F +0040004100420043004400450046004700480049004A004B004C004D004E004F +0050005100520053005400550056005700580059005A005B005C005D005E005F +0060006100620063006400650066006700680069006A006B006C006D006E006F +0070007100720073007400750076007700780079007A007B007C007D007E007F +00C700FC00E900E200E300E000C100E700EA00CA00E800CD00D400EC00C300C2 +00C900C000C800F400F500F200DA00F900CC00D500DC00A200A300D920A700D3 +00E100ED00F300FA00F100D100AA00BA00BF00D200AC00BD00BC00A100AB00BB +259125922593250225242561256225562555256325512557255D255C255B2510 +25142534252C251C2500253C255E255F255A25542569256625602550256C2567 +2568256425652559255825522553256B256A2518250C25882584258C25902580 +03B100DF039303C003A303C300B503C403A6039803A903B4221E03C603B52229 +226100B1226522642320232100F7224800B0221900B7221A207F00B225A000A0 diff --git a/mplug_owl2/lib/tcl8.6/encoding/euc-kr.enc b/mplug_owl2/lib/tcl8.6/encoding/euc-kr.enc new file mode 100644 index 0000000000000000000000000000000000000000..5e9bb93bbb0ae96e1d995570e4a336a1bcb00e42 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/encoding/euc-kr.enc @@ -0,0 +1,1533 @@ +# Encoding file: euc-kr, multi-byte +M +003F 0 90 +00 +0000000100020003000400050006000700080009000A000B000C000D000E000F +0010001100120013001400150016001700180019001A001B001C001D001E001F +0020002100220023002400250026002700280029002A002B002C002D002E002F +0030003100320033003400350036003700380039003A003B003C003D003E003F +0040004100420043004400450046004700480049004A004B004C004D004E004F +0050005100520053005400550056005700580059005A005B005C005D005E005F +0060006100620063006400650066006700680069006A006B006C006D006E006F +0070007100720073007400750076007700780079007A007B007C007D007E007F +0080008100820083008400850086008700880089008A008B008C008D008E008F +0090009100920093009400950096009700980099009A009B009C009D009E009F +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +A1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000030003001300200B72025202600A8300300AD20152225FF3C223C20182019 +201C201D3014301530083009300A300B300C300D300E300F3010301100B100D7 +00F7226022642265221E223400B0203220332103212BFFE0FFE1FFE526422640 +222022A52312220222072261225200A7203B2606260525CB25CF25CE25C725C6 +25A125A025B325B225BD25BC219221902191219321943013226A226B221A223D +221D2235222B222C2208220B2286228722822283222A222922272228FFE20000 +A2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000021D221D42200220300B4FF5E02C702D802DD02DA02D900B802DB00A100BF +02D0222E2211220F00A42109203025C125C025B725B626642660266126652667 +2663229925C825A325D025D1259225A425A525A825A725A625A92668260F260E +261C261E00B62020202121952197219921962198266D2669266A266C327F321C +211633C7212233C233D821210000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +A3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000FF01FF02FF03FF04FF05FF06FF07FF08FF09FF0AFF0BFF0CFF0DFF0EFF0F +FF10FF11FF12FF13FF14FF15FF16FF17FF18FF19FF1AFF1BFF1CFF1DFF1EFF1F +FF20FF21FF22FF23FF24FF25FF26FF27FF28FF29FF2AFF2BFF2CFF2DFF2EFF2F +FF30FF31FF32FF33FF34FF35FF36FF37FF38FF39FF3AFF3BFFE6FF3DFF3EFF3F +FF40FF41FF42FF43FF44FF45FF46FF47FF48FF49FF4AFF4BFF4CFF4DFF4EFF4F +FF50FF51FF52FF53FF54FF55FF56FF57FF58FF59FF5AFF5BFF5CFF5DFFE30000 +A4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000313131323133313431353136313731383139313A313B313C313D313E313F +3140314131423143314431453146314731483149314A314B314C314D314E314F +3150315131523153315431553156315731583159315A315B315C315D315E315F +3160316131623163316431653166316731683169316A316B316C316D316E316F +3170317131723173317431753176317731783179317A317B317C317D317E317F +3180318131823183318431853186318731883189318A318B318C318D318E0000 +A5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000217021712172217321742175217621772178217900000000000000000000 +2160216121622163216421652166216721682169000000000000000000000000 +0000039103920393039403950396039703980399039A039B039C039D039E039F +03A003A103A303A403A503A603A703A803A90000000000000000000000000000 +000003B103B203B303B403B503B603B703B803B903BA03BB03BC03BD03BE03BF +03C003C103C303C403C503C603C703C803C90000000000000000000000000000 +A6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000025002502250C251025182514251C252C25242534253C25012503250F2513 +251B251725232533252B253B254B2520252F25282537253F251D253025252538 +254225122511251A251925162515250E250D251E251F25212522252625272529 +252A252D252E25312532253525362539253A253D253E25402541254325442545 +2546254725482549254A00000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +A7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00003395339633972113339833C433A333A433A533A63399339A339B339C339D +339E339F33A033A133A233CA338D338E338F33CF3388338933C833A733A833B0 +33B133B233B333B433B533B633B733B833B93380338133823383338433BA33BB +33BC33BD33BE33BF33903391339233933394212633C033C1338A338B338C33D6 +33C533AD33AE33AF33DB33A933AA33AB33AC33DD33D033D333C333C933DC33C6 +0000000000000000000000000000000000000000000000000000000000000000 +A8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000C600D000AA0126000001320000013F014100D8015200BA00DE0166014A +00003260326132623263326432653266326732683269326A326B326C326D326E +326F3270327132723273327432753276327732783279327A327B24D024D124D2 +24D324D424D524D624D724D824D924DA24DB24DC24DD24DE24DF24E024E124E2 +24E324E424E524E624E724E824E9246024612462246324642465246624672468 +2469246A246B246C246D246E00BD2153215400BC00BE215B215C215D215E0000 +A9 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000000E6011100F001270131013301380140014200F8015300DF00FE0167014B +01493200320132023203320432053206320732083209320A320B320C320D320E +320F3210321132123213321432153216321732183219321A321B249C249D249E +249F24A024A124A224A324A424A524A624A724A824A924AA24AB24AC24AD24AE +24AF24B024B124B224B324B424B5247424752476247724782479247A247B247C +247D247E247F24802481248200B900B200B32074207F20812082208320840000 +AA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000304130423043304430453046304730483049304A304B304C304D304E304F +3050305130523053305430553056305730583059305A305B305C305D305E305F +3060306130623063306430653066306730683069306A306B306C306D306E306F +3070307130723073307430753076307730783079307A307B307C307D307E307F +3080308130823083308430853086308730883089308A308B308C308D308E308F +3090309130923093000000000000000000000000000000000000000000000000 +AB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000030A130A230A330A430A530A630A730A830A930AA30AB30AC30AD30AE30AF +30B030B130B230B330B430B530B630B730B830B930BA30BB30BC30BD30BE30BF +30C030C130C230C330C430C530C630C730C830C930CA30CB30CC30CD30CE30CF +30D030D130D230D330D430D530D630D730D830D930DA30DB30DC30DD30DE30DF +30E030E130E230E330E430E530E630E730E830E930EA30EB30EC30ED30EE30EF +30F030F130F230F330F430F530F6000000000000000000000000000000000000 +AC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000004100411041204130414041504010416041704180419041A041B041C041D +041E041F0420042104220423042404250426042704280429042A042B042C042D +042E042F00000000000000000000000000000000000000000000000000000000 +000004300431043204330434043504510436043704380439043A043B043C043D +043E043F0440044104420443044404450446044704480449044A044B044C044D +044E044F00000000000000000000000000000000000000000000000000000000 +B0 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000AC00AC01AC04AC07AC08AC09AC0AAC10AC11AC12AC13AC14AC15AC16AC17 +AC19AC1AAC1BAC1CAC1DAC20AC24AC2CAC2DAC2FAC30AC31AC38AC39AC3CAC40 +AC4BAC4DAC54AC58AC5CAC70AC71AC74AC77AC78AC7AAC80AC81AC83AC84AC85 +AC86AC89AC8AAC8BAC8CAC90AC94AC9CAC9DAC9FACA0ACA1ACA8ACA9ACAAACAC +ACAFACB0ACB8ACB9ACBBACBCACBDACC1ACC4ACC8ACCCACD5ACD7ACE0ACE1ACE4 +ACE7ACE8ACEAACECACEFACF0ACF1ACF3ACF5ACF6ACFCACFDAD00AD04AD060000 +B1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000AD0CAD0DAD0FAD11AD18AD1CAD20AD29AD2CAD2DAD34AD35AD38AD3CAD44 +AD45AD47AD49AD50AD54AD58AD61AD63AD6CAD6DAD70AD73AD74AD75AD76AD7B +AD7CAD7DAD7FAD81AD82AD88AD89AD8CAD90AD9CAD9DADA4ADB7ADC0ADC1ADC4 +ADC8ADD0ADD1ADD3ADDCADE0ADE4ADF8ADF9ADFCADFFAE00AE01AE08AE09AE0B +AE0DAE14AE30AE31AE34AE37AE38AE3AAE40AE41AE43AE45AE46AE4AAE4CAE4D +AE4EAE50AE54AE56AE5CAE5DAE5FAE60AE61AE65AE68AE69AE6CAE70AE780000 +B2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000AE79AE7BAE7CAE7DAE84AE85AE8CAEBCAEBDAEBEAEC0AEC4AECCAECDAECF +AED0AED1AED8AED9AEDCAEE8AEEBAEEDAEF4AEF8AEFCAF07AF08AF0DAF10AF2C +AF2DAF30AF32AF34AF3CAF3DAF3FAF41AF42AF43AF48AF49AF50AF5CAF5DAF64 +AF65AF79AF80AF84AF88AF90AF91AF95AF9CAFB8AFB9AFBCAFC0AFC7AFC8AFC9 +AFCBAFCDAFCEAFD4AFDCAFE8AFE9AFF0AFF1AFF4AFF8B000B001B004B00CB010 +B014B01CB01DB028B044B045B048B04AB04CB04EB053B054B055B057B0590000 +B3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B05DB07CB07DB080B084B08CB08DB08FB091B098B099B09AB09CB09FB0A0 +B0A1B0A2B0A8B0A9B0ABB0ACB0ADB0AEB0AFB0B1B0B3B0B4B0B5B0B8B0BCB0C4 +B0C5B0C7B0C8B0C9B0D0B0D1B0D4B0D8B0E0B0E5B108B109B10BB10CB110B112 +B113B118B119B11BB11CB11DB123B124B125B128B12CB134B135B137B138B139 +B140B141B144B148B150B151B154B155B158B15CB160B178B179B17CB180B182 +B188B189B18BB18DB192B193B194B198B19CB1A8B1CCB1D0B1D4B1DCB1DD0000 +B4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B1DFB1E8B1E9B1ECB1F0B1F9B1FBB1FDB204B205B208B20BB20CB214B215 +B217B219B220B234B23CB258B25CB260B268B269B274B275B27CB284B285B289 +B290B291B294B298B299B29AB2A0B2A1B2A3B2A5B2A6B2AAB2ACB2B0B2B4B2C8 +B2C9B2CCB2D0B2D2B2D8B2D9B2DBB2DDB2E2B2E4B2E5B2E6B2E8B2EBB2ECB2ED +B2EEB2EFB2F3B2F4B2F5B2F7B2F8B2F9B2FAB2FBB2FFB300B301B304B308B310 +B311B313B314B315B31CB354B355B356B358B35BB35CB35EB35FB364B3650000 +B5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B367B369B36BB36EB370B371B374B378B380B381B383B384B385B38CB390 +B394B3A0B3A1B3A8B3ACB3C4B3C5B3C8B3CBB3CCB3CEB3D0B3D4B3D5B3D7B3D9 +B3DBB3DDB3E0B3E4B3E8B3FCB410B418B41CB420B428B429B42BB434B450B451 +B454B458B460B461B463B465B46CB480B488B49DB4A4B4A8B4ACB4B5B4B7B4B9 +B4C0B4C4B4C8B4D0B4D5B4DCB4DDB4E0B4E3B4E4B4E6B4ECB4EDB4EFB4F1B4F8 +B514B515B518B51BB51CB524B525B527B528B529B52AB530B531B534B5380000 +B6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B540B541B543B544B545B54BB54CB54DB550B554B55CB55DB55FB560B561 +B5A0B5A1B5A4B5A8B5AAB5ABB5B0B5B1B5B3B5B4B5B5B5BBB5BCB5BDB5C0B5C4 +B5CCB5CDB5CFB5D0B5D1B5D8B5ECB610B611B614B618B625B62CB634B648B664 +B668B69CB69DB6A0B6A4B6ABB6ACB6B1B6D4B6F0B6F4B6F8B700B701B705B728 +B729B72CB72FB730B738B739B73BB744B748B74CB754B755B760B764B768B770 +B771B773B775B77CB77DB780B784B78CB78DB78FB790B791B792B796B7970000 +B7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B798B799B79CB7A0B7A8B7A9B7ABB7ACB7ADB7B4B7B5B7B8B7C7B7C9B7EC +B7EDB7F0B7F4B7FCB7FDB7FFB800B801B807B808B809B80CB810B818B819B81B +B81DB824B825B828B82CB834B835B837B838B839B840B844B851B853B85CB85D +B860B864B86CB86DB86FB871B878B87CB88DB8A8B8B0B8B4B8B8B8C0B8C1B8C3 +B8C5B8CCB8D0B8D4B8DDB8DFB8E1B8E8B8E9B8ECB8F0B8F8B8F9B8FBB8FDB904 +B918B920B93CB93DB940B944B94CB94FB951B958B959B95CB960B968B9690000 +B8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000B96BB96DB974B975B978B97CB984B985B987B989B98AB98DB98EB9ACB9AD +B9B0B9B4B9BCB9BDB9BFB9C1B9C8B9C9B9CCB9CEB9CFB9D0B9D1B9D2B9D8B9D9 +B9DBB9DDB9DEB9E1B9E3B9E4B9E5B9E8B9ECB9F4B9F5B9F7B9F8B9F9B9FABA00 +BA01BA08BA15BA38BA39BA3CBA40BA42BA48BA49BA4BBA4DBA4EBA53BA54BA55 +BA58BA5CBA64BA65BA67BA68BA69BA70BA71BA74BA78BA83BA84BA85BA87BA8C +BAA8BAA9BAABBAACBAB0BAB2BAB8BAB9BABBBABDBAC4BAC8BAD8BAD9BAFC0000 +B9 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000BB00BB04BB0DBB0FBB11BB18BB1CBB20BB29BB2BBB34BB35BB36BB38BB3B +BB3CBB3DBB3EBB44BB45BB47BB49BB4DBB4FBB50BB54BB58BB61BB63BB6CBB88 +BB8CBB90BBA4BBA8BBACBBB4BBB7BBC0BBC4BBC8BBD0BBD3BBF8BBF9BBFCBBFF +BC00BC02BC08BC09BC0BBC0CBC0DBC0FBC11BC14BC15BC16BC17BC18BC1BBC1C +BC1DBC1EBC1FBC24BC25BC27BC29BC2DBC30BC31BC34BC38BC40BC41BC43BC44 +BC45BC49BC4CBC4DBC50BC5DBC84BC85BC88BC8BBC8CBC8EBC94BC95BC970000 +BA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000BC99BC9ABCA0BCA1BCA4BCA7BCA8BCB0BCB1BCB3BCB4BCB5BCBCBCBDBCC0 +BCC4BCCDBCCFBCD0BCD1BCD5BCD8BCDCBCF4BCF5BCF6BCF8BCFCBD04BD05BD07 +BD09BD10BD14BD24BD2CBD40BD48BD49BD4CBD50BD58BD59BD64BD68BD80BD81 +BD84BD87BD88BD89BD8ABD90BD91BD93BD95BD99BD9ABD9CBDA4BDB0BDB8BDD4 +BDD5BDD8BDDCBDE9BDF0BDF4BDF8BE00BE03BE05BE0CBE0DBE10BE14BE1CBE1D +BE1FBE44BE45BE48BE4CBE4EBE54BE55BE57BE59BE5ABE5BBE60BE61BE640000 +BB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000BE68BE6ABE70BE71BE73BE74BE75BE7BBE7CBE7DBE80BE84BE8CBE8DBE8F +BE90BE91BE98BE99BEA8BED0BED1BED4BED7BED8BEE0BEE3BEE4BEE5BEECBF01 +BF08BF09BF18BF19BF1BBF1CBF1DBF40BF41BF44BF48BF50BF51BF55BF94BFB0 +BFC5BFCCBFCDBFD0BFD4BFDCBFDFBFE1C03CC051C058C05CC060C068C069C090 +C091C094C098C0A0C0A1C0A3C0A5C0ACC0ADC0AFC0B0C0B3C0B4C0B5C0B6C0BC +C0BDC0BFC0C0C0C1C0C5C0C8C0C9C0CCC0D0C0D8C0D9C0DBC0DCC0DDC0E40000 +BC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C0E5C0E8C0ECC0F4C0F5C0F7C0F9C100C104C108C110C115C11CC11DC11E +C11FC120C123C124C126C127C12CC12DC12FC130C131C136C138C139C13CC140 +C148C149C14BC14CC14DC154C155C158C15CC164C165C167C168C169C170C174 +C178C185C18CC18DC18EC190C194C196C19CC19DC19FC1A1C1A5C1A8C1A9C1AC +C1B0C1BDC1C4C1C8C1CCC1D4C1D7C1D8C1E0C1E4C1E8C1F0C1F1C1F3C1FCC1FD +C200C204C20CC20DC20FC211C218C219C21CC21FC220C228C229C22BC22D0000 +BD +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C22FC231C232C234C248C250C251C254C258C260C265C26CC26DC270C274 +C27CC27DC27FC281C288C289C290C298C29BC29DC2A4C2A5C2A8C2ACC2ADC2B4 +C2B5C2B7C2B9C2DCC2DDC2E0C2E3C2E4C2EBC2ECC2EDC2EFC2F1C2F6C2F8C2F9 +C2FBC2FCC300C308C309C30CC30DC313C314C315C318C31CC324C325C328C329 +C345C368C369C36CC370C372C378C379C37CC37DC384C388C38CC3C0C3D8C3D9 +C3DCC3DFC3E0C3E2C3E8C3E9C3EDC3F4C3F5C3F8C408C410C424C42CC4300000 +BE +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C434C43CC43DC448C464C465C468C46CC474C475C479C480C494C49CC4B8 +C4BCC4E9C4F0C4F1C4F4C4F8C4FAC4FFC500C501C50CC510C514C51CC528C529 +C52CC530C538C539C53BC53DC544C545C548C549C54AC54CC54DC54EC553C554 +C555C557C558C559C55DC55EC560C561C564C568C570C571C573C574C575C57C +C57DC580C584C587C58CC58DC58FC591C595C597C598C59CC5A0C5A9C5B4C5B5 +C5B8C5B9C5BBC5BCC5BDC5BEC5C4C5C5C5C6C5C7C5C8C5C9C5CAC5CCC5CE0000 +BF +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C5D0C5D1C5D4C5D8C5E0C5E1C5E3C5E5C5ECC5EDC5EEC5F0C5F4C5F6C5F7 +C5FCC5FDC5FEC5FFC600C601C605C606C607C608C60CC610C618C619C61BC61C +C624C625C628C62CC62DC62EC630C633C634C635C637C639C63BC640C641C644 +C648C650C651C653C654C655C65CC65DC660C66CC66FC671C678C679C67CC680 +C688C689C68BC68DC694C695C698C69CC6A4C6A5C6A7C6A9C6B0C6B1C6B4C6B8 +C6B9C6BAC6C0C6C1C6C3C6C5C6CCC6CDC6D0C6D4C6DCC6DDC6E0C6E1C6E80000 +C0 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C6E9C6ECC6F0C6F8C6F9C6FDC704C705C708C70CC714C715C717C719C720 +C721C724C728C730C731C733C735C737C73CC73DC740C744C74AC74CC74DC74F +C751C752C753C754C755C756C757C758C75CC760C768C76BC774C775C778C77C +C77DC77EC783C784C785C787C788C789C78AC78EC790C791C794C796C797C798 +C79AC7A0C7A1C7A3C7A4C7A5C7A6C7ACC7ADC7B0C7B4C7BCC7BDC7BFC7C0C7C1 +C7C8C7C9C7CCC7CEC7D0C7D8C7DDC7E4C7E8C7ECC800C801C804C808C80A0000 +C1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C810C811C813C815C816C81CC81DC820C824C82CC82DC82FC831C838C83C +C840C848C849C84CC84DC854C870C871C874C878C87AC880C881C883C885C886 +C887C88BC88CC88DC894C89DC89FC8A1C8A8C8BCC8BDC8C4C8C8C8CCC8D4C8D5 +C8D7C8D9C8E0C8E1C8E4C8F5C8FCC8FDC900C904C905C906C90CC90DC90FC911 +C918C92CC934C950C951C954C958C960C961C963C96CC970C974C97CC988C989 +C98CC990C998C999C99BC99DC9C0C9C1C9C4C9C7C9C8C9CAC9D0C9D1C9D30000 +C2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000C9D5C9D6C9D9C9DAC9DCC9DDC9E0C9E2C9E4C9E7C9ECC9EDC9EFC9F0C9F1 +C9F8C9F9C9FCCA00CA08CA09CA0BCA0CCA0DCA14CA18CA29CA4CCA4DCA50CA54 +CA5CCA5DCA5FCA60CA61CA68CA7DCA84CA98CABCCABDCAC0CAC4CACCCACDCACF +CAD1CAD3CAD8CAD9CAE0CAECCAF4CB08CB10CB14CB18CB20CB21CB41CB48CB49 +CB4CCB50CB58CB59CB5DCB64CB78CB79CB9CCBB8CBD4CBE4CBE7CBE9CC0CCC0D +CC10CC14CC1CCC1DCC21CC22CC27CC28CC29CC2CCC2ECC30CC38CC39CC3B0000 +C3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000CC3CCC3DCC3ECC44CC45CC48CC4CCC54CC55CC57CC58CC59CC60CC64CC66 +CC68CC70CC75CC98CC99CC9CCCA0CCA8CCA9CCABCCACCCADCCB4CCB5CCB8CCBC +CCC4CCC5CCC7CCC9CCD0CCD4CCE4CCECCCF0CD01CD08CD09CD0CCD10CD18CD19 +CD1BCD1DCD24CD28CD2CCD39CD5CCD60CD64CD6CCD6DCD6FCD71CD78CD88CD94 +CD95CD98CD9CCDA4CDA5CDA7CDA9CDB0CDC4CDCCCDD0CDE8CDECCDF0CDF8CDF9 +CDFBCDFDCE04CE08CE0CCE14CE19CE20CE21CE24CE28CE30CE31CE33CE350000 +C4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000CE58CE59CE5CCE5FCE60CE61CE68CE69CE6BCE6DCE74CE75CE78CE7CCE84 +CE85CE87CE89CE90CE91CE94CE98CEA0CEA1CEA3CEA4CEA5CEACCEADCEC1CEE4 +CEE5CEE8CEEBCEECCEF4CEF5CEF7CEF8CEF9CF00CF01CF04CF08CF10CF11CF13 +CF15CF1CCF20CF24CF2CCF2DCF2FCF30CF31CF38CF54CF55CF58CF5CCF64CF65 +CF67CF69CF70CF71CF74CF78CF80CF85CF8CCFA1CFA8CFB0CFC4CFE0CFE1CFE4 +CFE8CFF0CFF1CFF3CFF5CFFCD000D004D011D018D02DD034D035D038D03C0000 +C5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000D044D045D047D049D050D054D058D060D06CD06DD070D074D07CD07DD081 +D0A4D0A5D0A8D0ACD0B4D0B5D0B7D0B9D0C0D0C1D0C4D0C8D0C9D0D0D0D1D0D3 +D0D4D0D5D0DCD0DDD0E0D0E4D0ECD0EDD0EFD0F0D0F1D0F8D10DD130D131D134 +D138D13AD140D141D143D144D145D14CD14DD150D154D15CD15DD15FD161D168 +D16CD17CD184D188D1A0D1A1D1A4D1A8D1B0D1B1D1B3D1B5D1BAD1BCD1C0D1D8 +D1F4D1F8D207D209D210D22CD22DD230D234D23CD23DD23FD241D248D25C0000 +C6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000D264D280D281D284D288D290D291D295D29CD2A0D2A4D2ACD2B1D2B8D2B9 +D2BCD2BFD2C0D2C2D2C8D2C9D2CBD2D4D2D8D2DCD2E4D2E5D2F0D2F1D2F4D2F8 +D300D301D303D305D30CD30DD30ED310D314D316D31CD31DD31FD320D321D325 +D328D329D32CD330D338D339D33BD33CD33DD344D345D37CD37DD380D384D38C +D38DD38FD390D391D398D399D39CD3A0D3A8D3A9D3ABD3ADD3B4D3B8D3BCD3C4 +D3C5D3C8D3C9D3D0D3D8D3E1D3E3D3ECD3EDD3F0D3F4D3FCD3FDD3FFD4010000 +C7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000D408D41DD440D444D45CD460D464D46DD46FD478D479D47CD47FD480D482 +D488D489D48BD48DD494D4A9D4CCD4D0D4D4D4DCD4DFD4E8D4ECD4F0D4F8D4FB +D4FDD504D508D50CD514D515D517D53CD53DD540D544D54CD54DD54FD551D558 +D559D55CD560D565D568D569D56BD56DD574D575D578D57CD584D585D587D588 +D589D590D5A5D5C8D5C9D5CCD5D0D5D2D5D8D5D9D5DBD5DDD5E4D5E5D5E8D5EC +D5F4D5F5D5F7D5F9D600D601D604D608D610D611D613D614D615D61CD6200000 +C8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000D624D62DD638D639D63CD640D645D648D649D64BD64DD651D654D655D658 +D65CD667D669D670D671D674D683D685D68CD68DD690D694D69DD69FD6A1D6A8 +D6ACD6B0D6B9D6BBD6C4D6C5D6C8D6CCD6D1D6D4D6D7D6D9D6E0D6E4D6E8D6F0 +D6F5D6FCD6FDD700D704D711D718D719D71CD720D728D729D72BD72DD734D735 +D738D73CD744D747D749D750D751D754D756D757D758D759D760D761D763D765 +D769D76CD770D774D77CD77DD781D788D789D78CD790D798D799D79BD79D0000 +CA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00004F3D4F73504750F952A053EF547554E556095AC15BB6668767B667B767EF +6B4C73C275C27A3C82DB8304885788888A368CC88DCF8EFB8FE699D5523B5374 +5404606A61646BBC73CF811A89BA89D295A34F83520A58BE597859E65E725E79 +61C763C0674667EC687F6F97764E770B78F57A087AFF7C21809D826E82718AEB +95934E6B559D66F76E3478A37AED845B8910874E97A852D8574E582A5D4C611F +61BE6221656267D16A446E1B751875B376E377B07D3A90AF945194529F950000 +CB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000053235CAC753280DB92409598525B580859DC5CA15D175EB75F3A5F4A6177 +6C5F757A75867CE07D737DB17F8C81548221859189418B1B92FC964D9C474ECB +4EF7500B51F1584F6137613E6168653969EA6F1175A5768676D67B8782A584CB +F90093A7958B55805BA25751F9017CB37FB991B5502853BB5C455DE862D2636E +64DA64E76E2070AC795B8DDD8E1EF902907D924592F84E7E4EF650655DFE5EFA +61066957817186548E4793759A2B4E5E5091677068405109528D52926AA20000 +CC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000077BC92109ED452AB602F8FF2504861A963ED64CA683C6A846FC0818889A1 +96945805727D72AC75047D797E6D80A9898B8B7490639D5162896C7A6F547D50 +7F3A8A23517C614A7B9D8B199257938C4EAC4FD3501E50BE510652C152CD537F +577058835E9A5F91617661AC64CE656C666F66BB66F468976D87708570F1749F +74A574CA75D9786C78EC7ADF7AF67D457D938015803F811B83968B668F159015 +93E1980398389A5A9BE84FC25553583A59515B635C4660B86212684268B00000 +CD +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000068E86EAA754C767878CE7A3D7CFB7E6B7E7C8A088AA18C3F968E9DC453E4 +53E9544A547156FA59D15B645C3B5EAB62F765376545657266A067AF69C16CBD +75FC7690777E7A3F7F94800380A1818F82E682FD83F085C1883188B48AA5F903 +8F9C932E96C798679AD89F1354ED659B66F2688F7A408C379D6056F057645D11 +660668B168CD6EFE7428889E9BE46C68F9049AA84F9B516C5171529F5B545DE5 +6050606D62F163A7653B73D97A7A86A38CA2978F4E325BE16208679C74DC0000 +CE +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000079D183D38A878AB28DE8904E934B98465ED369E885FF90EDF90551A05B98 +5BEC616368FA6B3E704C742F74D87BA17F5083C589C08CAB95DC9928522E605D +62EC90024F8A5149532158D95EE366E06D38709A72C273D67B5080F1945B5366 +639B7F6B4E565080584A58DE602A612762D069D09B415B8F7D1880B18F5F4EA4 +50D154AC55AC5B0C5DA05DE7652A654E68216A4B72E1768E77EF7D5E7FF981A0 +854E86DF8F038F4E90CA99039A559BAB4E184E454E5D4EC74FF1517752FE0000 +CF +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000534053E353E5548E5614577557A25BC75D875ED061FC62D8655167B867E9 +69CB6B506BC66BEC6C426E9D707872D77396740377BF77E97A767D7F800981FC +8205820A82DF88628B338CFC8EC0901190B1926492B699D29A459CE99DD79F9C +570B5C4083CA97A097AB9EB4541B7A987FA488D98ECD90E158005C4863987A9F +5BAE5F137A797AAE828E8EAC5026523852F85377570862F363726B0A6DC37737 +53A5735785688E7695D5673A6AC36F708A6D8ECC994BF90666776B788CB40000 +D0 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00009B3CF90753EB572D594E63C669FB73EA78457ABA7AC57CFE8475898F8D73 +903595A852FB574775477B6083CC921EF9086A58514B524B5287621F68D86975 +969950C552A452E461C365A4683969FF747E7B4B82B983EB89B28B398FD19949 +F9094ECA599764D266116A8E7434798179BD82A9887E887F895FF90A93264F0B +53CA602562716C727D1A7D664E98516277DC80AF4F014F0E5176518055DC5668 +573B57FA57FC5914594759935BC45C905D0E5DF15E7E5FCC628065D765E30000 +D1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000671E671F675E68CB68C46A5F6B3A6C236C7D6C826DC773987426742A7482 +74A37578757F788178EF794179477948797A7B957D007DBA7F888006802D808C +8A188B4F8C488D779321932498E299519A0E9A0F9A659E927DCA4F76540962EE +685491D155AB513AF90BF90C5A1C61E6F90D62CF62FFF90EF90FF910F911F912 +F91390A3F914F915F916F917F9188AFEF919F91AF91BF91C6696F91D7156F91E +F91F96E3F920634F637A5357F921678F69606E73F9227537F923F924F9250000 +D2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00007D0DF926F927887256CA5A18F928F929F92AF92BF92C4E43F92D51675948 +67F08010F92E59735E74649A79CA5FF5606C62C8637B5BE75BD752AAF92F5974 +5F296012F930F931F9327459F933F934F935F936F937F93899D1F939F93AF93B +F93CF93DF93EF93FF940F941F942F9436FC3F944F94581BF8FB260F1F946F947 +8166F948F9495C3FF94AF94BF94CF94DF94EF94FF950F9515AE98A25677B7D10 +F952F953F954F955F956F95780FDF958F9595C3C6CE5533F6EBA591A83360000 +D3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00004E394EB64F4655AE571858C75F5665B765E66A806BB56E4D77ED7AEF7C1E +7DDE86CB88929132935B64BB6FBE737A75B890545556574D61BA64D466C76DE1 +6E5B6F6D6FB975F0804381BD854189838AC78B5A931F6C9375537B548E0F905D +5510580258585E626207649E68E075767CD687B39EE84EE35788576E59275C0D +5CB15E365F85623464E173B381FA888B8CB8968A9EDB5B855FB760B350125200 +52305716583558575C0E5C605CF65D8B5EA65F9260BC63116389641768430000 +D4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000068F96AC26DD86E216ED46FE471FE76DC777979B17A3B840489A98CED8DF3 +8E4890039014905390FD934D967697DC6BD27006725872A27368776379BF7BE4 +7E9B8B8058A960C7656665FD66BE6C8C711E71C98C5A98134E6D7A814EDD51AC +51CD52D5540C61A76771685068DF6D1E6F7C75BC77B37AE580F484639285515C +6597675C679375D87AC78373F95A8C469017982D5C6F81C0829A9041906F920D +5F975D9D6A5971C8767B7B4985E48B0491279A30558761F6F95B76697F850000 +D5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000863F87BA88F8908FF95C6D1B70D973DE7D61843DF95D916A99F1F95E4E82 +53756B046B12703E721B862D9E1E524C8FA35D5064E5652C6B166FEB7C437E9C +85CD896489BD62C981D8881F5ECA67176D6A72FC7405746F878290DE4F865D0D +5FA0840A51B763A075654EAE5006516951C968816A117CAE7CB17CE7826F8AD2 +8F1B91CF4FB6513752F554425EEC616E623E65C56ADA6FFE792A85DC882395AD +9A629A6A9E979ECE529B66C66B77701D792B8F6297426190620065236F230000 +D6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000714974897DF4806F84EE8F269023934A51BD521752A36D0C70C888C25EC9 +65826BAE6FC27C3E73754EE44F3656F9F95F5CBA5DBA601C73B27B2D7F9A7FCE +8046901E923496F6974898189F614F8B6FA779AE91B496B752DEF960648864C4 +6AD36F5E7018721076E780018606865C8DEF8F0597329B6F9DFA9E75788C797F +7DA083C993049E7F9E938AD658DF5F046727702774CF7C60807E512170287262 +78CA8CC28CDA8CF496F74E8650DA5BEE5ED6659971CE764277AD804A84FC0000 +D7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000907C9B279F8D58D85A415C626A136DDA6F0F763B7D2F7E37851E893893E4 +964B528965D267F369B46D416E9C700F7409746075597624786B8B2C985E516D +622E96784F96502B5D196DEA7DB88F2A5F8B61446817F961968652D2808B51DC +51CC695E7A1C7DBE83F196754FDA52295398540F550E5C6560A7674E68A86D6C +728172F874067483F96275E27C6C7F797FB8838988CF88E191CC91D096E29BC9 +541D6F7E71D0749885FA8EAA96A39C579E9F67976DCB743381E89716782C0000 +D8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00007ACB7B207C926469746A75F278BC78E899AC9B549EBB5BDE5E556F20819C +83AB90884E07534D5A295DD25F4E6162633D666966FC6EFF6F2B7063779E842C +8513883B8F1399459C3B551C62B9672B6CAB8309896A977A4EA159845FD85FD9 +671B7DB27F548292832B83BD8F1E909957CB59B95A925BD06627679A68856BCF +71647F758CB78CE390819B4581088C8A964C9A409EA55B5F6C13731B76F276DF +840C51AA8993514D519552C968C96C94770477207DBF7DEC97629EB56EC50000 +D9 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000851151A5540D547D660E669D69276E9F76BF7791831784C2879F91699298 +9CF488824FAE519252DF59C65E3D61556478647966AE67D06A216BCD6BDB725F +72617441773877DB801782BC83058B008B288C8C67286C90726776EE77667A46 +9DA96B7F6C92592267268499536F589359995EDF63CF663467736E3A732B7AD7 +82D7932852D95DEB61AE61CB620A62C764AB65E069596B666BCB712173F7755D +7E46821E8302856A8AA38CBF97279D6158A89ED85011520E543B554F65870000 +DA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00006C767D0A7D0B805E868A958096EF52FF6C95726954735A9A5C3E5D4B5F4C +5FAE672A68B669636E3C6E4477097C737F8E85878B0E8FF797619EF45CB760B6 +610D61AB654F65FB65FC6C116CEF739F73C97DE195945BC6871C8B10525D535A +62CD640F64B267346A386CCA73C0749E7B947C957E1B818A823685848FEB96F9 +99C14F34534A53CD53DB62CC642C6500659169C36CEE6F5873ED7554762276E4 +76FC78D078FB792C7D46822C87E08FD4981298EF52C362D464A56E246F510000 +DB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000767C8DCB91B192629AEE9B435023508D574A59A85C285E475F77623F653E +65B965C16609678B699C6EC278C57D2180AA8180822B82B384A1868C8A2A8B17 +90A696329F90500D4FF3F96357F95F9862DC6392676F6E43711976C380CC80DA +88F488F589198CE08F29914D966A4F2F4F705E1B67CF6822767D767E9B445E61 +6A0A716971D4756AF9647E41854385E998DC4F107B4F7F7095A551E15E0668B5 +6C3E6C4E6CDB72AF7BC483036CD5743A50FB528858C164D86A9774A776560000 +DC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000078A7861795E29739F965535E5F018B8A8FA88FAF908A522577A59C499F08 +4E19500251755C5B5E77661E663A67C468C570B3750175C579C97ADD8F279920 +9A084FDD582158315BF6666E6B656D116E7A6F7D73E4752B83E988DC89138B5C +8F144F0F50D55310535C5B935FA9670D798F8179832F8514890789868F398F3B +99A59C12672C4E764FF859495C015CEF5CF0636768D270FD71A2742B7E2B84EC +8702902292D29CF34E0D4ED84FEF50855256526F5426549057E0592B5A660000 +DD +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00005B5A5B755BCC5E9CF9666276657765A76D6E6EA572367B267C3F7F368150 +8151819A8240829983A98A038CA08CE68CFB8D748DBA90E891DC961C964499D9 +9CE7531752065429567458B35954596E5FFF61A4626E66106C7E711A76C67C89 +7CDE7D1B82AC8CC196F0F9674F5B5F175F7F62C25D29670B68DA787C7E439D6C +4E1550995315532A535159835A625E8760B2618A624962796590678769A76BD4 +6BD66BD76BD86CB8F968743575FA7812789179D579D87C837DCB7FE180A50000 +DE +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000813E81C283F2871A88E88AB98B6C8CBB9119975E98DB9F3B56AC5B2A5F6C +658C6AB36BAF6D5C6FF17015725D73AD8CA78CD3983B61916C3780589A014E4D +4E8B4E9B4ED54F3A4F3C4F7F4FDF50FF53F253F8550655E356DB58EB59625A11 +5BEB5BFA5C045DF35E2B5F99601D6368659C65AF67F667FB68AD6B7B6C996CD7 +6E23700973457802793E7940796079C17BE97D177D728086820D838E84D186C7 +88DF8A508A5E8B1D8CDC8D668FAD90AA98FC99DF9E9D524AF9696714F96A0000 +DF +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00005098522A5C7165636C5573CA7523759D7B97849C917897304E7764926BBA +715E85A94E09F96B674968EE6E17829F8518886B63F76F81921298AF4E0A50B7 +50CF511F554655AA56175B405C195CE05E385E8A5EA05EC260F368516A616E58 +723D724072C076F879657BB17FD488F389F48A738C618CDE971C585E74BD8CFD +55C7F96C7A617D2282727272751F7525F96D7B19588558FB5DBC5E8F5EB65F90 +60556292637F654D669166D966F8681668F27280745E7B6E7D6E7DD67F720000 +E0 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000080E5821285AF897F8A93901D92E49ECD9F205915596D5E2D60DC66146673 +67906C506DC56F5F77F378A984C691CB932B4ED950CA514855845B0B5BA36247 +657E65CB6E32717D74017444748774BF766C79AA7DDA7E557FA8817A81B38239 +861A87EC8A758DE3907892919425994D9BAE53685C5169546CC46D296E2B820C +859B893B8A2D8AAA96EA9F67526166B96BB27E9687FE8D0D9583965D651D6D89 +71EEF96E57CE59D35BAC602760FA6210661F665F732973F976DB77017B6C0000 +E1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00008056807281658AA091924E1652E26B726D177A057B397D30F96F8CB053EC +562F58515BB55C0F5C115DE2624063836414662D68B36CBC6D886EAF701F70A4 +71D27526758F758E76197B117BE07C2B7D207D39852C856D86078A34900D9061 +90B592B797F69A374FD75C6C675F6D917C9F7E8C8B168D16901F5B6B5DFD640D +84C0905C98E173875B8B609A677E6DDE8A1F8AA69001980C5237F9707051788E +9396887091D74FEE53D755FD56DA578258FD5AC25B885CAB5CC05E2561010000 +E2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000620D624B6388641C653665786A396B8A6C346D196F3171E772E973787407 +74B27626776179C07A577AEA7CB97D8F7DAC7E617F9E81298331849084DA85EA +88968AB08B908F3890429083916C929692B9968B96A796A896D6970098089996 +9AD39B1A53D4587E59195B705BBF6DD16F5A719F742174B9808583FD5DE15F87 +5FAA604265EC6812696F6A536B896D356DF373E376FE77AC7B4D7D148123821C +834084F485638A628AC49187931E980699B4620C88538FF092655D075D270000 +E3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00005D69745F819D87686FD562FE7FD2893689724E1E4E5850E752DD5347627F +66077E698805965E4F8D5319563659CB5AA45C385C4E5C4D5E025F11604365BD +662F664267BE67F4731C77E2793A7FC5849484CD89968A668A698AE18C558C7A +57F45BD45F0F606F62ED690D6B966E5C71847BD287558B588EFE98DF98FE4F38 +4F814FE1547B5A205BB8613C65B0666871FC7533795E7D33814E81E3839885AA +85CE87038A0A8EAB8F9BF9718FC559315BA45BE660895BE95C0B5FC36C810000 +E4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000F9726DF1700B751A82AF8AF64EC05341F97396D96C0F4E9E4FC45152555E +5A255CE86211725982BD83AA86FE88598A1D963F96C599139D099D5D580A5CB3 +5DBD5E4460E1611563E16A026E2591029354984E9C109F775B895CB86309664F +6848773C96C1978D98549B9F65A18B018ECB95BC55355CA95DD65EB56697764C +83F495C758D362BC72CE9D284EF0592E600F663B6B8379E79D26539354C057C3 +5D16611B66D66DAF788D827E969897445384627C63966DB27E0A814B984D0000 +E5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00006AFB7F4C9DAF9E1A4E5F503B51B6591C60F963F66930723A8036F97491CE +5F31F975F9767D0482E5846F84BB85E58E8DF9774F6FF978F97958E45B436059 +63DA6518656D6698F97A694A6A236D0B7001716C75D2760D79B37A70F97B7F8A +F97C8944F97D8B9391C0967DF97E990A57045FA165BC6F01760079A68A9E99AD +9B5A9F6C510461B662916A8D81C6504358305F6671098A008AFA5B7C86164FFA +513C56B4594463A96DF95DAA696D51864E884F59F97FF980F9815982F9820000 +E6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000F9836B5F6C5DF98474B57916F9858207824583398F3F8F5DF9869918F987 +F988F9894EA6F98A57DF5F796613F98BF98C75AB7E798B6FF98D90069A5B56A5 +582759F85A1F5BB4F98E5EF6F98FF9906350633BF991693D6C876CBF6D8E6D93 +6DF56F14F99270DF71367159F99371C371D5F994784F786FF9957B757DE3F996 +7E2FF997884D8EDFF998F999F99A925BF99B9CF6F99CF99DF99E60856D85F99F +71B1F9A0F9A195B153ADF9A2F9A3F9A467D3F9A5708E71307430827682D20000 +E7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000F9A695BB9AE59E7D66C4F9A771C18449F9A8F9A9584BF9AAF9AB5DB85F71 +F9AC6620668E697969AE6C386CF36E366F416FDA701B702F715071DF7370F9AD +745BF9AE74D476C87A4E7E93F9AFF9B082F18A608FCEF9B19348F9B29719F9B3 +F9B44E42502AF9B5520853E166F36C6D6FCA730A777F7A6282AE85DD8602F9B6 +88D48A638B7D8C6BF9B792B3F9B8971398104E944F0D4FC950B25348543E5433 +55DA586258BA59675A1B5BE4609FF9B961CA655665FF666468A76C5A6FB30000 +E8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000070CF71AC73527B7D87088AA49C329F075C4B6C8373447389923A6EAB7465 +761F7A697E15860A514058C564C174EE751576707FC1909596CD99546E2674E6 +7AA97AAA81E586D987788A1B5A495B8C5B9B68A169006D6373A97413742C7897 +7DE97FEB81188155839E8C4C962E981166F05F8065FA67896C6A738B502D5A03 +6B6A77EE59165D6C5DCD7325754FF9BAF9BB50E551F9582F592D599659DA5BE5 +F9BCF9BD5DA262D76416649364FEF9BE66DCF9BF6A48F9C071FF7464F9C10000 +E9 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00007A887AAF7E477E5E80008170F9C287EF89818B209059F9C390809952617E +6B326D747E1F89258FB14FD150AD519752C757C758895BB95EB8614269956D8C +6E676EB6719474627528752C8073833884C98E0A939493DEF9C44E8E4F515076 +512A53C853CB53F35B875BD35C24611A618265F4725B7397744076C279507991 +79B97D067FBD828B85D5865E8FC2904790F591EA968596E896E952D65F6765ED +6631682F715C7A3690C1980A4E91F9C56A526B9E6F907189801882B885530000 +EA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000904B969596F297FB851A9B314E90718A96C45143539F54E15713571257A3 +5A9B5AC45BC36028613F63F46C856D396E726E907230733F745782D188818F45 +9060F9C6966298589D1B67088D8A925E4F4D504950DE5371570D59D45A015C09 +617066906E2D7232744B7DEF80C3840E8466853F875F885B89188B02905597CB +9B4F4E734F915112516AF9C7552F55A95B7A5BA55E7C5E7D5EBE60A060DF6108 +610963C465386709F9C867D467DAF9C9696169626CB96D27F9CA6E38F9CB0000 +EB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00006FE173367337F9CC745C7531F9CD7652F9CEF9CF7DAD81FE843888D58A98 +8ADB8AED8E308E42904A903E907A914991C9936EF9D0F9D15809F9D26BD38089 +80B2F9D3F9D45141596B5C39F9D5F9D66F6473A780E48D07F9D79217958FF9D8 +F9D9F9DAF9DB807F620E701C7D68878DF9DC57A0606961476BB78ABE928096B1 +4E59541F6DEB852D967097F398EE63D66CE3909151DD61C981BA9DF94F9D501A +51005B9C610F61FF64EC69056BC5759177E37FA98264858F87FB88638ABC0000 +EC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00008B7091AB4E8C4EE54F0AF9DDF9DE593759E8F9DF5DF25F1B5F5B6021F9E0 +F9E1F9E2F9E3723E73E5F9E4757075CDF9E579FBF9E6800C8033808482E18351 +F9E7F9E88CBD8CB39087F9E9F9EA98F4990CF9EBF9EC703776CA7FCA7FCC7FFC +8B1A4EBA4EC152035370F9ED54BD56E059FB5BC55F155FCD6E6EF9EEF9EF7D6A +8335F9F086938A8DF9F1976D9777F9F2F9F34E004F5A4F7E58F965E56EA29038 +93B099B94EFB58EC598A59D96041F9F4F9F57A14F9F6834F8CC3516553440000 +ED +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000F9F7F9F8F9F94ECD52695B5582BF4ED4523A54A859C959FF5B505B575B5C +606361486ECB7099716E738674F775B578C17D2B800581EA8328851785C98AEE +8CC796CC4F5C52FA56BC65AB6628707C70B872357DBD828D914C96C09D725B71 +68E76B986F7A76DE5C9166AB6F5B7BB47C2A883696DC4E084ED75320583458BB +58EF596C5C075E335E845F35638C66B267566A1F6AA36B0C6F3F7246F9FA7350 +748B7AE07CA7817881DF81E7838A846C8523859485CF88DD8D1391AC95770000 +EE +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000969C518D54C957285BB0624D6750683D68936E3D6ED3707D7E2188C18CA1 +8F099F4B9F4E722D7B8F8ACD931A4F474F4E5132548059D05E9562B56775696E +6A176CAE6E1A72D9732A75BD7BB87D3582E783F9845785F78A5B8CAF8E879019 +90B896CE9F5F52E3540A5AE15BC2645865756EF472C4F9FB76847A4D7B1B7C4D +7E3E7FDF837B8B2B8CCA8D648DE18E5F8FEA8FF9906993D14F434F7A50B35168 +5178524D526A5861587C59605C085C555EDB609B623068136BBF6C086FB10000 +EF +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000714E742075307538755176727B4C7B8B7BAD7BC67E8F8A6E8F3E8F49923F +92939322942B96FB985A986B991E5207622A62986D5976647ACA7BC07D765360 +5CBE5E976F3870B97C9897119B8E9EDE63A5647A87764E014E954EAD505C5075 +544859C35B9A5E405EAD5EF75F8160C5633A653F657465CC6676667867FE6968 +6A896B636C406DC06DE86E1F6E5E701E70A1738E73FD753A775B7887798E7A0B +7A7D7CBE7D8E82478A028AEA8C9E912D914A91D8926692CC9320970697560000 +F0 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000975C98029F0E52365291557C58245E1D5F1F608C63D068AF6FDF796D7B2C +81CD85BA88FD8AF88E44918D9664969B973D984C9F4A4FCE514651CB52A95632 +5F145F6B63AA64CD65E9664166FA66F9671D689D68D769FD6F156F6E716771E5 +722A74AA773A7956795A79DF7A207A957C977CDF7D447E70808785FB86A48A54 +8ABF8D998E819020906D91E3963B96D59CE565CF7C078DB393C35B585C0A5352 +62D9731D50275B975F9E60B0616B68D56DD9742E7A2E7D427D9C7E31816B0000 +F1 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00008E2A8E35937E94184F5057505DE65EA7632B7F6A4E3B4F4F4F8F505A59DD +80C4546A546855FE594F5B995DDE5EDA665D673167F1682A6CE86D326E4A6F8D +70B773E075877C4C7D027D2C7DA2821F86DB8A3B8A858D708E8A8F339031914E +9152944499D07AF97CA54FCA510151C657C85BEF5CFB66596A3D6D5A6E966FEC +710C756F7AE388229021907596CB99FF83014E2D4EF2884691CD537D6ADB696B +6C41847A589E618E66FE62EF70DD751175C77E5284B88B498D084E4B53EA0000 +F2 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000054AB573057405FD763016307646F652F65E8667A679D67B36B626C606C9A +6F2C77E57825794979577D1980A2810281F3829D82B787188A8CF9FC8D048DBE +907276F47A197A377E548077550755D45875632F64226649664B686D699B6B84 +6D256EB173CD746874A1755B75B976E1771E778B79E67E097E1D81FB852F8897 +8A3A8CD18EEB8FB0903293AD9663967397074F8453F159EA5AC95E19684E74C6 +75BE79E97A9281A386ED8CEA8DCC8FED659F6715F9FD57F76F577DDD8F2F0000 +F3 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000093F696C65FB561F26F844E144F98501F53C955DF5D6F5DEE6B216B6478CB +7B9AF9FE8E498ECA906E6349643E77407A84932F947F9F6A64B06FAF71E674A8 +74DA7AC47C127E827CB27E988B9A8D0A947D9910994C52395BDF64E6672D7D2E +50ED53C358796158615961FA65AC7AD98B928B9650095021527555315A3C5EE0 +5F706134655E660C663666A269CD6EC46F32731676217A938139825983D684BC +50B557F05BC05BE85F6963A178267DB583DC852191C791F5518A67F57B560000 +F4 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00008CAC51C459BB60BD8655501CF9FF52545C3A617D621A62D364F265A56ECC +7620810A8E60965F96BB4EDF5343559859295DDD64C56CC96DFA73947A7F821B +85A68CE48E10907791E795E1962197C651F854F255865FB964A46F887DB48F1F +8F4D943550C95C166CBE6DFB751B77BB7C3D7C648A798AC2581E59BE5E166377 +7252758A776B8ADC8CBC8F125EF366746DF8807D83C18ACB97519BD6FA005243 +66FF6D956EEF7DE08AE6902E905E9AD4521D527F54E86194628462DB68A20000 +F5 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00006912695A6A3570927126785D7901790E79D27A0D8096827882D583498549 +8C828D859162918B91AE4FC356D171ED77D7870089F85BF85FD6675190A853E2 +585A5BF560A4618164607E3D80708525928364AE50AC5D146700589C62BD63A8 +690E69786A1E6E6B76BA79CB82BB84298ACF8DA88FFD9112914B919C93109318 +939A96DB9A369C0D4E11755C795D7AFA7B517BC97E2E84C48E598E748EF89010 +6625693F744351FA672E9EDC51455FE06C9687F2885D887760B481B584030000 +F6 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00008D0553D6543956345A365C31708A7FE0805A810681ED8DA391899A5F9DF2 +50744EC453A060FB6E2C5C644F88502455E45CD95E5F606568946CBB6DC471BE +75D475F476617A1A7A497DC77DFB7F6E81F486A98F1C96C999B39F52524752C5 +98ED89AA4E0367D26F064FB55BE267956C886D78741B782791DD937C87C479E4 +7A315FEB4ED654A4553E58AE59A560F0625362D6673669558235964099B199DD +502C53535544577CFA016258FA0264E2666B67DD6FC16FEF742274388A170000 +F7 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000094385451560657665F48619A6B4E705870AD7DBB8A95596A812B63A27708 +803D8CAA5854642D69BB5B955E116E6FFA038569514C53F0592A6020614B6B86 +6C706CF07B1E80CE82D48DC690B098B1FA0464C76FA464916504514E5410571F +8A0E615F6876FA0575DB7B527D71901A580669CC817F892A9000983950785957 +59AC6295900F9B2A615D727995D657615A465DF4628A64AD64FA67776CE26D3E +722C743678347F7782AD8DDB981752245742677F724874E38CA98FA692110000 +F8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000962A516B53ED634C4F695504609665576C9B6D7F724C72FD7A1789878C9D +5F6D6F8E70F981A8610E4FBF504F624172477BC77DE87FE9904D97AD9A198CB6 +576A5E7367B0840D8A5554205B165E635EE25F0A658380BA853D9589965B4F48 +5305530D530F548654FA57035E036016629B62B16355FA066CE16D6675B17832 +80DE812F82DE846184B2888D8912900B92EA98FD9B915E4566B466DD70117206 +FA074FF5527D5F6A615367536A196F0274E2796888688C7998C798C49A430000 +F9 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +000054C17A1F69538AF78C4A98A899AE5F7C62AB75B276AE88AB907F96425339 +5F3C5FC56CCC73CC7562758B7B4682FE999D4E4F903C4E0B4F5553A6590F5EC8 +66306CB37455837787668CC09050971E9C1558D15B7886508B149DB45BD26068 +608D65F16C576F226FA3701A7F557FF095919592965097D352728F4451FD542B +54B85563558A6ABB6DB57DD88266929C96779E79540854C876D286E495A495D4 +965C4EA24F0959EE5AE65DF760526297676D68416C866E2F7F38809B822A0000 +FA +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000FA08FA0998054EA5505554B35793595A5B695BB361C869776D77702387F9 +89E38A728AE7908299ED9AB852BE683850165E78674F8347884C4EAB541156AE +73E6911597FF9909995799995653589F865B8A3161B26AF6737B8ED26B4796AA +9A57595572008D6B97694FD45CF45F2661F8665B6CEB70AB738473B973FE7729 +774D7D437D627E2382378852FA0A8CE29249986F5B517A74884098015ACC4FE0 +5354593E5CFD633E6D7972F98105810783A292CF98304EA851445211578B0000 +FB +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +00005F626CC26ECE7005705070AF719273E97469834A87A28861900890A293A3 +99A8516E5F5760E0616766B385598E4A91AF978B4E4E4E92547C58D558FA597D +5CB55F2762366248660A66676BEB6D696DCF6E566EF86F946FE06FE9705D72D0 +7425745A74E07693795C7CCA7E1E80E182A6846B84BF864E865F87748B778C6A +93AC9800986560D1621691775A5A660F6DF76E3E743F9B425FFD60DA7B0F54C4 +5F186C5E6CD36D2A70D87D0586798A0C9D3B5316548C5B056A3A706B75750000 +FC +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000798D79BE82B183EF8A718B418CA89774FA0B64F4652B78BA78BB7A6B4E38 +559A59505BA65E7B60A363DB6B61666568536E19716574B07D0890849A699C25 +6D3B6ED1733E8C4195CA51F05E4C5FA8604D60F66130614C6643664469A56CC1 +6E5F6EC96F62714C749C76877BC17C27835287579051968D9EC3532F56DE5EFB +5F8A6062609461F7666667036A9C6DEE6FAE7070736A7E6A81BE833486D48AA8 +8CC4528373725B966A6B940454EE56865B5D6548658566C9689F6D8D6DC60000 +FD +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000723B80B491759A4D4FAF5019539A540E543C558955C55E3F5F8C673D7166 +73DD900552DB52F3586458CE7104718F71FB85B08A13668885A855A76684714A +8431534955996BC15F595FBD63EE668971478AF18F1D9EBE4F11643A70CB7566 +866760648B4E9DF8514751F653086D3680F89ED166156B23709875D554035C79 +7D078A166B206B3D6B46543860706D3D7FD5820850D651DE559C566B56CD59EC +5B095E0C619961986231665E66E6719971B971BA72A779A77A007FB28A700000 diff --git a/mplug_owl2/lib/tcl8.6/encoding/macCroatian.enc b/mplug_owl2/lib/tcl8.6/encoding/macCroatian.enc new file mode 100644 index 0000000000000000000000000000000000000000..c23d0f0b77230435ea4dce64004ee77f3983c47b --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/encoding/macCroatian.enc @@ -0,0 +1,20 @@ +# Encoding file: macCroatian, single-byte +S +003F 0 1 +00 +0000000100020003000400050006000700080009000A000B000C000D000E000F +0010001100120013001400150016001700180019001A001B001C001D001E001F +0020002100220023002400250026002700280029002A002B002C002D002E002F +0030003100320033003400350036003700380039003A003B003C003D003E003F +0040004100420043004400450046004700480049004A004B004C004D004E004F +0050005100520053005400550056005700580059005A005B005C005D005E005F +0060006100620063006400650066006700680069006A006B006C006D006E006F +0070007100720073007400750076007700780079007A007B007C007D007E007F +00C400C500C700C900D100D600DC00E100E000E200E400E300E500E700E900E8 +00EA00EB00ED00EC00EE00EF00F100F300F200F400F600F500FA00F900FB00FC +202000B000A200A300A7202200B600DF00AE0160212200B400A82260017D00D8 +221E00B122642265220600B522022211220F0161222B00AA00BA03A9017E00F8 +00BF00A100AC221A01922248010600AB010C202600A000C000C300D501520153 +01102014201C201D2018201900F725CAF8FF00A9204420AC2039203A00C600BB +201300B7201A201E203000C2010700C1010D00C800CD00CE00CF00CC00D300D4 +011100D200DA00DB00D9013102C602DC00AF03C000CB02DA00B800CA00E602C7 diff --git a/mplug_owl2/lib/tcl8.6/http1.0/http.tcl b/mplug_owl2/lib/tcl8.6/http1.0/http.tcl new file mode 100644 index 0000000000000000000000000000000000000000..8329de4144d837f24328efcb1391e7b4e7ff5ba1 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/http1.0/http.tcl @@ -0,0 +1,377 @@ +# http.tcl +# Client-side HTTP for GET, POST, and HEAD commands. +# These routines can be used in untrusted code that uses the Safesock +# security policy. +# These procedures use a callback interface to avoid using vwait, +# which is not defined in the safe base. +# +# See the http.n man page for documentation + +package provide http 1.0 + +array set http { + -accept */* + -proxyhost {} + -proxyport {} + -useragent {Tcl http client package 1.0} + -proxyfilter httpProxyRequired +} +proc http_config {args} { + global http + set options [lsort [array names http -*]] + set usage [join $options ", "] + if {[llength $args] == 0} { + set result {} + foreach name $options { + lappend result $name $http($name) + } + return $result + } + regsub -all -- - $options {} options + set pat ^-([join $options |])$ + if {[llength $args] == 1} { + set flag [lindex $args 0] + if {[regexp -- $pat $flag]} { + return $http($flag) + } else { + return -code error "Unknown option $flag, must be: $usage" + } + } else { + foreach {flag value} $args { + if {[regexp -- $pat $flag]} { + set http($flag) $value + } else { + return -code error "Unknown option $flag, must be: $usage" + } + } + } +} + + proc httpFinish { token {errormsg ""} } { + upvar #0 $token state + global errorInfo errorCode + if {[string length $errormsg] != 0} { + set state(error) [list $errormsg $errorInfo $errorCode] + set state(status) error + } + catch {close $state(sock)} + catch {after cancel $state(after)} + if {[info exists state(-command)]} { + if {[catch {eval $state(-command) {$token}} err]} { + if {[string length $errormsg] == 0} { + set state(error) [list $err $errorInfo $errorCode] + set state(status) error + } + } + unset state(-command) + } +} +proc http_reset { token {why reset} } { + upvar #0 $token state + set state(status) $why + catch {fileevent $state(sock) readable {}} + httpFinish $token + if {[info exists state(error)]} { + set errorlist $state(error) + unset state(error) + eval error $errorlist + } +} +proc http_get { url args } { + global http + if {![info exists http(uid)]} { + set http(uid) 0 + } + set token http#[incr http(uid)] + upvar #0 $token state + http_reset $token + array set state { + -blocksize 8192 + -validate 0 + -headers {} + -timeout 0 + state header + meta {} + currentsize 0 + totalsize 0 + type text/html + body {} + status "" + } + set options {-blocksize -channel -command -handler -headers \ + -progress -query -validate -timeout} + set usage [join $options ", "] + regsub -all -- - $options {} options + set pat ^-([join $options |])$ + foreach {flag value} $args { + if {[regexp $pat $flag]} { + # Validate numbers + if {[info exists state($flag)] && \ + [regexp {^[0-9]+$} $state($flag)] && \ + ![regexp {^[0-9]+$} $value]} { + return -code error "Bad value for $flag ($value), must be integer" + } + set state($flag) $value + } else { + return -code error "Unknown option $flag, can be: $usage" + } + } + if {! [regexp -nocase {^(http://)?([^/:]+)(:([0-9]+))?(/.*)?$} $url \ + x proto host y port srvurl]} { + error "Unsupported URL: $url" + } + if {[string length $port] == 0} { + set port 80 + } + if {[string length $srvurl] == 0} { + set srvurl / + } + if {[string length $proto] == 0} { + set url http://$url + } + set state(url) $url + if {![catch {$http(-proxyfilter) $host} proxy]} { + set phost [lindex $proxy 0] + set pport [lindex $proxy 1] + } + if {$state(-timeout) > 0} { + set state(after) [after $state(-timeout) [list http_reset $token timeout]] + } + if {[info exists phost] && [string length $phost]} { + set srvurl $url + set s [socket $phost $pport] + } else { + set s [socket $host $port] + } + set state(sock) $s + + # Send data in cr-lf format, but accept any line terminators + + fconfigure $s -translation {auto crlf} -buffersize $state(-blocksize) + + # The following is disallowed in safe interpreters, but the socket + # is already in non-blocking mode in that case. + + catch {fconfigure $s -blocking off} + set len 0 + set how GET + if {[info exists state(-query)]} { + set len [string length $state(-query)] + if {$len > 0} { + set how POST + } + } elseif {$state(-validate)} { + set how HEAD + } + puts $s "$how $srvurl HTTP/1.0" + puts $s "Accept: $http(-accept)" + puts $s "Host: $host" + puts $s "User-Agent: $http(-useragent)" + foreach {key value} $state(-headers) { + regsub -all \[\n\r\] $value {} value + set key [string trim $key] + if {[string length $key]} { + puts $s "$key: $value" + } + } + if {$len > 0} { + puts $s "Content-Length: $len" + puts $s "Content-Type: application/x-www-form-urlencoded" + puts $s "" + fconfigure $s -translation {auto binary} + puts -nonewline $s $state(-query) + } else { + puts $s "" + } + flush $s + fileevent $s readable [list httpEvent $token] + if {! [info exists state(-command)]} { + http_wait $token + } + return $token +} +proc http_data {token} { + upvar #0 $token state + return $state(body) +} +proc http_status {token} { + upvar #0 $token state + return $state(status) +} +proc http_code {token} { + upvar #0 $token state + return $state(http) +} +proc http_size {token} { + upvar #0 $token state + return $state(currentsize) +} + + proc httpEvent {token} { + upvar #0 $token state + set s $state(sock) + + if {[eof $s]} { + httpEof $token + return + } + if {$state(state) == "header"} { + set n [gets $s line] + if {$n == 0} { + set state(state) body + if {![regexp -nocase ^text $state(type)]} { + # Turn off conversions for non-text data + fconfigure $s -translation binary + if {[info exists state(-channel)]} { + fconfigure $state(-channel) -translation binary + } + } + if {[info exists state(-channel)] && + ![info exists state(-handler)]} { + # Initiate a sequence of background fcopies + fileevent $s readable {} + httpCopyStart $s $token + } + } elseif {$n > 0} { + if {[regexp -nocase {^content-type:(.+)$} $line x type]} { + set state(type) [string trim $type] + } + if {[regexp -nocase {^content-length:(.+)$} $line x length]} { + set state(totalsize) [string trim $length] + } + if {[regexp -nocase {^([^:]+):(.+)$} $line x key value]} { + lappend state(meta) $key $value + } elseif {[regexp ^HTTP $line]} { + set state(http) $line + } + } + } else { + if {[catch { + if {[info exists state(-handler)]} { + set n [eval $state(-handler) {$s $token}] + } else { + set block [read $s $state(-blocksize)] + set n [string length $block] + if {$n >= 0} { + append state(body) $block + } + } + if {$n >= 0} { + incr state(currentsize) $n + } + } err]} { + httpFinish $token $err + } else { + if {[info exists state(-progress)]} { + eval $state(-progress) {$token $state(totalsize) $state(currentsize)} + } + } + } +} + proc httpCopyStart {s token} { + upvar #0 $token state + if {[catch { + fcopy $s $state(-channel) -size $state(-blocksize) -command \ + [list httpCopyDone $token] + } err]} { + httpFinish $token $err + } +} + proc httpCopyDone {token count {error {}}} { + upvar #0 $token state + set s $state(sock) + incr state(currentsize) $count + if {[info exists state(-progress)]} { + eval $state(-progress) {$token $state(totalsize) $state(currentsize)} + } + if {([string length $error] != 0)} { + httpFinish $token $error + } elseif {[eof $s]} { + httpEof $token + } else { + httpCopyStart $s $token + } +} + proc httpEof {token} { + upvar #0 $token state + if {$state(state) == "header"} { + # Premature eof + set state(status) eof + } else { + set state(status) ok + } + set state(state) eof + httpFinish $token +} +proc http_wait {token} { + upvar #0 $token state + if {![info exists state(status)] || [string length $state(status)] == 0} { + vwait $token\(status) + } + if {[info exists state(error)]} { + set errorlist $state(error) + unset state(error) + eval error $errorlist + } + return $state(status) +} + +# Call http_formatQuery with an even number of arguments, where the first is +# a name, the second is a value, the third is another name, and so on. + +proc http_formatQuery {args} { + set result "" + set sep "" + foreach i $args { + append result $sep [httpMapReply $i] + if {$sep != "="} { + set sep = + } else { + set sep & + } + } + return $result +} + +# do x-www-urlencoded character mapping +# The spec says: "non-alphanumeric characters are replaced by '%HH'" +# 1 leave alphanumerics characters alone +# 2 Convert every other character to an array lookup +# 3 Escape constructs that are "special" to the tcl parser +# 4 "subst" the result, doing all the array substitutions + + proc httpMapReply {string} { + global httpFormMap + set alphanumeric a-zA-Z0-9 + if {![info exists httpFormMap]} { + + for {set i 1} {$i <= 256} {incr i} { + set c [format %c $i] + if {![string match \[$alphanumeric\] $c]} { + set httpFormMap($c) %[format %.2x $i] + } + } + # These are handled specially + array set httpFormMap { + " " + \n %0d%0a + } + } + regsub -all \[^$alphanumeric\] $string {$httpFormMap(&)} string + regsub -all \n $string {\\n} string + regsub -all \t $string {\\t} string + regsub -all {[][{})\\]\)} $string {\\&} string + return [subst $string] +} + +# Default proxy filter. + proc httpProxyRequired {host} { + global http + if {[info exists http(-proxyhost)] && [string length $http(-proxyhost)]} { + if {![info exists http(-proxyport)] || ![string length $http(-proxyport)]} { + set http(-proxyport) 8080 + } + return [list $http(-proxyhost) $http(-proxyport)] + } else { + return {} + } +} diff --git a/mplug_owl2/lib/tcl8.6/http1.0/pkgIndex.tcl b/mplug_owl2/lib/tcl8.6/http1.0/pkgIndex.tcl new file mode 100644 index 0000000000000000000000000000000000000000..ab6170f7f623e269c442ceef76e21b1cc4fb8c6f --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/http1.0/pkgIndex.tcl @@ -0,0 +1,11 @@ +# Tcl package index file, version 1.0 +# This file is generated by the "pkg_mkIndex" command +# and sourced either when an application starts up or +# by a "package unknown" script. It invokes the +# "package ifneeded" command to set up package-related +# information so that packages will be loaded automatically +# in response to "package require" commands. When this +# script is sourced, the variable $dir must contain the +# full path name of this file's directory. + +package ifneeded http 1.0 [list tclPkgSetup $dir http 1.0 {{http.tcl source {httpCopyDone httpCopyStart httpEof httpEvent httpFinish httpMapReply httpProxyRequired http_code http_config http_data http_formatQuery http_get http_reset http_size http_status http_wait}}}] diff --git a/mplug_owl2/lib/tcl8.6/package.tcl b/mplug_owl2/lib/tcl8.6/package.tcl new file mode 100644 index 0000000000000000000000000000000000000000..4ccc20a50432c227ffe8e7c4b2e4a07f9218474f --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/package.tcl @@ -0,0 +1,751 @@ +# package.tcl -- +# +# utility procs formerly in init.tcl which can be loaded on demand +# for package management. +# +# Copyright (c) 1991-1993 The Regents of the University of California. +# Copyright (c) 1994-1998 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. +# + +namespace eval tcl::Pkg {} + +# ::tcl::Pkg::CompareExtension -- +# +# Used internally by pkg_mkIndex to compare the extension of a file to a given +# extension. On Windows, it uses a case-insensitive comparison because the +# file system can be file insensitive. +# +# Arguments: +# fileName name of a file whose extension is compared +# ext (optional) The extension to compare against; you must +# provide the starting dot. +# Defaults to [info sharedlibextension] +# +# Results: +# Returns 1 if the extension matches, 0 otherwise + +proc tcl::Pkg::CompareExtension {fileName {ext {}}} { + global tcl_platform + if {$ext eq ""} {set ext [info sharedlibextension]} + if {$tcl_platform(platform) eq "windows"} { + return [string equal -nocase [file extension $fileName] $ext] + } else { + # Some unices add trailing numbers after the .so, so + # we could have something like '.so.1.2'. + set root $fileName + while {1} { + set currExt [file extension $root] + if {$currExt eq $ext} { + return 1 + } + + # The current extension does not match; if it is not a numeric + # value, quit, as we are only looking to ignore version number + # extensions. Otherwise we might return 1 in this case: + # tcl::Pkg::CompareExtension foo.so.bar .so + # which should not match. + + if {![string is integer -strict [string range $currExt 1 end]]} { + return 0 + } + set root [file rootname $root] + } + } +} + +# pkg_mkIndex -- +# This procedure creates a package index in a given directory. The package +# index consists of a "pkgIndex.tcl" file whose contents are a Tcl script that +# sets up package information with "package require" commands. The commands +# describe all of the packages defined by the files given as arguments. +# +# Arguments: +# -direct (optional) If this flag is present, the generated +# code in pkgMkIndex.tcl will cause the package to be +# loaded when "package require" is executed, rather +# than lazily when the first reference to an exported +# procedure in the package is made. +# -verbose (optional) Verbose output; the name of each file that +# was successfully processed is printed out. Additionally, +# if processing of a file failed a message is printed. +# -load pat (optional) Preload any packages whose names match +# the pattern. Used to handle DLLs that depend on +# other packages during their Init procedure. +# dir - Name of the directory in which to create the index. +# args - Any number of additional arguments, each giving +# a glob pattern that matches the names of one or +# more shared libraries or Tcl script files in +# dir. + +proc pkg_mkIndex {args} { + set usage {"pkg_mkIndex ?-direct? ?-lazy? ?-load pattern? ?-verbose? ?--? dir ?pattern ...?"} + + set argCount [llength $args] + if {$argCount < 1} { + return -code error "wrong # args: should be\n$usage" + } + + set more "" + set direct 1 + set doVerbose 0 + set loadPat "" + for {set idx 0} {$idx < $argCount} {incr idx} { + set flag [lindex $args $idx] + switch -glob -- $flag { + -- { + # done with the flags + incr idx + break + } + -verbose { + set doVerbose 1 + } + -lazy { + set direct 0 + append more " -lazy" + } + -direct { + append more " -direct" + } + -load { + incr idx + set loadPat [lindex $args $idx] + append more " -load $loadPat" + } + -* { + return -code error "unknown flag $flag: should be\n$usage" + } + default { + # done with the flags + break + } + } + } + + set dir [lindex $args $idx] + set patternList [lrange $args [expr {$idx + 1}] end] + if {![llength $patternList]} { + set patternList [list "*.tcl" "*[info sharedlibextension]"] + } + + try { + set fileList [glob -directory $dir -tails -types {r f} -- \ + {*}$patternList] + } on error {msg opt} { + return -options $opt $msg + } + foreach file $fileList { + # For each file, figure out what commands and packages it provides. + # To do this, create a child interpreter, load the file into the + # interpreter, and get a list of the new commands and packages that + # are defined. + + if {$file eq "pkgIndex.tcl"} { + continue + } + + set c [interp create] + + # Load into the child any packages currently loaded in the parent + # interpreter that match the -load pattern. + + if {$loadPat ne ""} { + if {$doVerbose} { + tclLog "currently loaded packages: '[info loaded]'" + tclLog "trying to load all packages matching $loadPat" + } + if {![llength [info loaded]]} { + tclLog "warning: no packages are currently loaded, nothing" + tclLog "can possibly match '$loadPat'" + } + } + foreach pkg [info loaded] { + if {![string match -nocase $loadPat [lindex $pkg 1]]} { + continue + } + if {$doVerbose} { + tclLog "package [lindex $pkg 1] matches '$loadPat'" + } + try { + load [lindex $pkg 0] [lindex $pkg 1] $c + } on error err { + if {$doVerbose} { + tclLog "warning: load [lindex $pkg 0]\ + [lindex $pkg 1]\nfailed with: $err" + } + } on ok {} { + if {$doVerbose} { + tclLog "loaded [lindex $pkg 0] [lindex $pkg 1]" + } + } + if {[lindex $pkg 1] eq "Tk"} { + # Withdraw . if Tk was loaded, to avoid showing a window. + $c eval [list wm withdraw .] + } + } + + $c eval { + # Stub out the package command so packages can require other + # packages. + + rename package __package_orig + proc package {what args} { + switch -- $what { + require { + return; # Ignore transitive requires + } + default { + __package_orig $what {*}$args + } + } + } + proc tclPkgUnknown args {} + package unknown tclPkgUnknown + + # Stub out the unknown command so package can call into each other + # during their initialization. + + proc unknown {args} {} + + # Stub out the auto_import mechanism + + proc auto_import {args} {} + + # reserve the ::tcl namespace for support procs and temporary + # variables. This might make it awkward to generate a + # pkgIndex.tcl file for the ::tcl namespace. + + namespace eval ::tcl { + variable dir ;# Current directory being processed + variable file ;# Current file being processed + variable direct ;# -direct flag value + variable x ;# Loop variable + variable debug ;# For debugging + variable type ;# "load" or "source", for -direct + variable namespaces ;# Existing namespaces (e.g., ::tcl) + variable packages ;# Existing packages (e.g., Tcl) + variable origCmds ;# Existing commands + variable newCmds ;# Newly created commands + variable newPkgs {} ;# Newly created packages + } + } + + $c eval [list set ::tcl::dir $dir] + $c eval [list set ::tcl::file $file] + $c eval [list set ::tcl::direct $direct] + + # Download needed procedures into the child because we've just deleted + # the unknown procedure. This doesn't handle procedures with default + # arguments. + + foreach p {::tcl::Pkg::CompareExtension} { + $c eval [list namespace eval [namespace qualifiers $p] {}] + $c eval [list proc $p [info args $p] [info body $p]] + } + + try { + $c eval { + set ::tcl::debug "loading or sourcing" + + # we need to track command defined by each package even in the + # -direct case, because they are needed internally by the + # "partial pkgIndex.tcl" step above. + + proc ::tcl::GetAllNamespaces {{root ::}} { + set list $root + foreach ns [namespace children $root] { + lappend list {*}[::tcl::GetAllNamespaces $ns] + } + return $list + } + + # init the list of existing namespaces, packages, commands + + foreach ::tcl::x [::tcl::GetAllNamespaces] { + set ::tcl::namespaces($::tcl::x) 1 + } + foreach ::tcl::x [package names] { + if {[package provide $::tcl::x] ne ""} { + set ::tcl::packages($::tcl::x) 1 + } + } + set ::tcl::origCmds [info commands] + + # Try to load the file if it has the shared library extension, + # otherwise source it. It's important not to try to load + # files that aren't shared libraries, because on some systems + # (like SunOS) the loader will abort the whole application + # when it gets an error. + + if {[::tcl::Pkg::CompareExtension $::tcl::file [info sharedlibextension]]} { + # The "file join ." command below is necessary. Without + # it, if the file name has no \'s and we're on UNIX, the + # load command will invoke the LD_LIBRARY_PATH search + # mechanism, which could cause the wrong file to be used. + + set ::tcl::debug loading + load [file join $::tcl::dir $::tcl::file] + set ::tcl::type load + } else { + set ::tcl::debug sourcing + source [file join $::tcl::dir $::tcl::file] + set ::tcl::type source + } + + # As a performance optimization, if we are creating direct + # load packages, don't bother figuring out the set of commands + # created by the new packages. We only need that list for + # setting up the autoloading used in the non-direct case. + if {!$::tcl::direct} { + # See what new namespaces appeared, and import commands + # from them. Only exported commands go into the index. + + foreach ::tcl::x [::tcl::GetAllNamespaces] { + if {![info exists ::tcl::namespaces($::tcl::x)]} { + namespace import -force ${::tcl::x}::* + } + + # Figure out what commands appeared + + foreach ::tcl::x [info commands] { + set ::tcl::newCmds($::tcl::x) 1 + } + foreach ::tcl::x $::tcl::origCmds { + unset -nocomplain ::tcl::newCmds($::tcl::x) + } + foreach ::tcl::x [array names ::tcl::newCmds] { + # determine which namespace a command comes from + + set ::tcl::abs [namespace origin $::tcl::x] + + # special case so that global names have no + # leading ::, this is required by the unknown + # command + + set ::tcl::abs \ + [lindex [auto_qualify $::tcl::abs ::] 0] + + if {$::tcl::x ne $::tcl::abs} { + # Name changed during qualification + + set ::tcl::newCmds($::tcl::abs) 1 + unset ::tcl::newCmds($::tcl::x) + } + } + } + } + + # Look through the packages that appeared, and if there is a + # version provided, then record it + + foreach ::tcl::x [package names] { + if {[package provide $::tcl::x] ne "" + && ![info exists ::tcl::packages($::tcl::x)]} { + lappend ::tcl::newPkgs \ + [list $::tcl::x [package provide $::tcl::x]] + } + } + } + } on error msg { + set what [$c eval set ::tcl::debug] + if {$doVerbose} { + tclLog "warning: error while $what $file: $msg" + } + } on ok {} { + set what [$c eval set ::tcl::debug] + if {$doVerbose} { + tclLog "successful $what of $file" + } + set type [$c eval set ::tcl::type] + set cmds [lsort [$c eval array names ::tcl::newCmds]] + set pkgs [$c eval set ::tcl::newPkgs] + if {$doVerbose} { + if {!$direct} { + tclLog "commands provided were $cmds" + } + tclLog "packages provided were $pkgs" + } + if {[llength $pkgs] > 1} { + tclLog "warning: \"$file\" provides more than one package ($pkgs)" + } + foreach pkg $pkgs { + # cmds is empty/not used in the direct case + lappend files($pkg) [list $file $type $cmds] + } + + if {$doVerbose} { + tclLog "processed $file" + } + } + interp delete $c + } + + append index "# Tcl package index file, version 1.1\n" + append index "# This file is generated by the \"pkg_mkIndex$more\" command\n" + append index "# and sourced either when an application starts up or\n" + append index "# by a \"package unknown\" script. It invokes the\n" + append index "# \"package ifneeded\" command to set up package-related\n" + append index "# information so that packages will be loaded automatically\n" + append index "# in response to \"package require\" commands. When this\n" + append index "# script is sourced, the variable \$dir must contain the\n" + append index "# full path name of this file's directory.\n" + + foreach pkg [lsort [array names files]] { + set cmd {} + lassign $pkg name version + lappend cmd ::tcl::Pkg::Create -name $name -version $version + foreach spec [lsort -index 0 $files($pkg)] { + foreach {file type procs} $spec { + if {$direct} { + set procs {} + } + lappend cmd "-$type" [list $file $procs] + } + } + append index "\n[eval $cmd]" + } + + set f [open [file join $dir pkgIndex.tcl] w] + puts $f $index + close $f +} + +# tclPkgSetup -- +# This is a utility procedure use by pkgIndex.tcl files. It is invoked as +# part of a "package ifneeded" script. It calls "package provide" to indicate +# that a package is available, then sets entries in the auto_index array so +# that the package's files will be auto-loaded when the commands are used. +# +# Arguments: +# dir - Directory containing all the files for this package. +# pkg - Name of the package (no version number). +# version - Version number for the package, such as 2.1.3. +# files - List of files that constitute the package. Each +# element is a sub-list with three elements. The first +# is the name of a file relative to $dir, the second is +# "load" or "source", indicating whether the file is a +# loadable binary or a script to source, and the third +# is a list of commands defined by this file. + +proc tclPkgSetup {dir pkg version files} { + global auto_index + + package provide $pkg $version + foreach fileInfo $files { + set f [lindex $fileInfo 0] + set type [lindex $fileInfo 1] + foreach cmd [lindex $fileInfo 2] { + if {$type eq "load"} { + set auto_index($cmd) [list load [file join $dir $f] $pkg] + } else { + set auto_index($cmd) [list source [file join $dir $f]] + } + } + } +} + +# tclPkgUnknown -- +# This procedure provides the default for the "package unknown" function. It +# is invoked when a package that's needed can't be found. It scans the +# auto_path directories and their immediate children looking for pkgIndex.tcl +# files and sources any such files that are found to setup the package +# database. As it searches, it will recognize changes to the auto_path and +# scan any new directories. +# +# Arguments: +# name - Name of desired package. Not used. +# version - Version of desired package. Not used. +# exact - Either "-exact" or omitted. Not used. + +proc tclPkgUnknown {name args} { + global auto_path env + + if {![info exists auto_path]} { + return + } + # Cache the auto_path, because it may change while we run through the + # first set of pkgIndex.tcl files + set old_path [set use_path $auto_path] + while {[llength $use_path]} { + set dir [lindex $use_path end] + + # Make sure we only scan each directory one time. + if {[info exists tclSeenPath($dir)]} { + set use_path [lrange $use_path 0 end-1] + continue + } + set tclSeenPath($dir) 1 + + # Get the pkgIndex.tcl files in subdirectories of auto_path directories. + # - Safe Base interpreters have a restricted "glob" command that + # works in this case. + # - The "catch" was essential when there was no safe glob and every + # call in a safe interp failed; it is retained only for corner + # cases in which the eventual call to glob returns an error. + catch { + foreach file [glob -directory $dir -join -nocomplain \ + * pkgIndex.tcl] { + set dir [file dirname $file] + if {![info exists procdDirs($dir)]} { + try { + source $file + } trap {POSIX EACCES} {} { + # $file was not readable; silently ignore + continue + } on error msg { + tclLog "error reading package index file $file: $msg" + } on ok {} { + set procdDirs($dir) 1 + } + } + } + } + set dir [lindex $use_path end] + if {![info exists procdDirs($dir)]} { + set file [file join $dir pkgIndex.tcl] + # safe interps usually don't have "file exists", + if {([interp issafe] || [file exists $file])} { + try { + source $file + } trap {POSIX EACCES} {} { + # $file was not readable; silently ignore + continue + } on error msg { + tclLog "error reading package index file $file: $msg" + } on ok {} { + set procdDirs($dir) 1 + } + } + } + + set use_path [lrange $use_path 0 end-1] + + # Check whether any of the index scripts we [source]d above set a new + # value for $::auto_path. If so, then find any new directories on the + # $::auto_path, and lappend them to the $use_path we are working from. + # This gives index scripts the (arguably unwise) power to expand the + # index script search path while the search is in progress. + set index 0 + if {[llength $old_path] == [llength $auto_path]} { + foreach dir $auto_path old $old_path { + if {$dir ne $old} { + # This entry in $::auto_path has changed. + break + } + incr index + } + } + + # $index now points to the first element of $auto_path that has + # changed, or the beginning if $auto_path has changed length Scan the + # new elements of $auto_path for directories to add to $use_path. + # Don't add directories we've already seen, or ones already on the + # $use_path. + foreach dir [lrange $auto_path $index end] { + if {![info exists tclSeenPath($dir)] && ($dir ni $use_path)} { + lappend use_path $dir + } + } + set old_path $auto_path + } +} + +# tcl::MacOSXPkgUnknown -- +# This procedure extends the "package unknown" function for MacOSX. It scans +# the Resources/Scripts directories of the immediate children of the auto_path +# directories for pkgIndex files. +# +# Arguments: +# original - original [package unknown] procedure +# name - Name of desired package. Not used. +# version - Version of desired package. Not used. +# exact - Either "-exact" or omitted. Not used. + +proc tcl::MacOSXPkgUnknown {original name args} { + # First do the cross-platform default search + uplevel 1 $original [linsert $args 0 $name] + + # Now do MacOSX specific searching + global auto_path + + if {![info exists auto_path]} { + return + } + # Cache the auto_path, because it may change while we run through the + # first set of pkgIndex.tcl files + set old_path [set use_path $auto_path] + while {[llength $use_path]} { + set dir [lindex $use_path end] + + # Make sure we only scan each directory one time. + if {[info exists tclSeenPath($dir)]} { + set use_path [lrange $use_path 0 end-1] + continue + } + set tclSeenPath($dir) 1 + + # get the pkgIndex files out of the subdirectories + # Safe interpreters do not use tcl::MacOSXPkgUnknown - see init.tcl. + foreach file [glob -directory $dir -join -nocomplain \ + * Resources Scripts pkgIndex.tcl] { + set dir [file dirname $file] + if {![info exists procdDirs($dir)]} { + try { + source $file + } trap {POSIX EACCES} {} { + # $file was not readable; silently ignore + continue + } on error msg { + tclLog "error reading package index file $file: $msg" + } on ok {} { + set procdDirs($dir) 1 + } + } + } + set use_path [lrange $use_path 0 end-1] + + # Check whether any of the index scripts we [source]d above set a new + # value for $::auto_path. If so, then find any new directories on the + # $::auto_path, and lappend them to the $use_path we are working from. + # This gives index scripts the (arguably unwise) power to expand the + # index script search path while the search is in progress. + set index 0 + if {[llength $old_path] == [llength $auto_path]} { + foreach dir $auto_path old $old_path { + if {$dir ne $old} { + # This entry in $::auto_path has changed. + break + } + incr index + } + } + + # $index now points to the first element of $auto_path that has + # changed, or the beginning if $auto_path has changed length Scan the + # new elements of $auto_path for directories to add to $use_path. + # Don't add directories we've already seen, or ones already on the + # $use_path. + foreach dir [lrange $auto_path $index end] { + if {![info exists tclSeenPath($dir)] && ($dir ni $use_path)} { + lappend use_path $dir + } + } + set old_path $auto_path + } +} + +# ::tcl::Pkg::Create -- +# +# Given a package specification generate a "package ifneeded" statement +# for the package, suitable for inclusion in a pkgIndex.tcl file. +# +# Arguments: +# args arguments used by the Create function: +# -name packageName +# -version packageVersion +# -load {filename ?{procs}?} +# ... +# -source {filename ?{procs}?} +# ... +# +# Any number of -load and -source parameters may be +# specified, so long as there is at least one -load or +# -source parameter. If the procs component of a module +# specifier is left off, that module will be set up for +# direct loading; otherwise, it will be set up for lazy +# loading. If both -source and -load are specified, the +# -load'ed files will be loaded first, followed by the +# -source'd files. +# +# Results: +# An appropriate "package ifneeded" statement for the package. + +proc ::tcl::Pkg::Create {args} { + append err(usage) "[lindex [info level 0] 0] " + append err(usage) "-name packageName -version packageVersion" + append err(usage) "?-load {filename ?{procs}?}? ... " + append err(usage) "?-source {filename ?{procs}?}? ..." + + set err(wrongNumArgs) "wrong # args: should be \"$err(usage)\"" + set err(valueMissing) "value for \"%s\" missing: should be \"$err(usage)\"" + set err(unknownOpt) "unknown option \"%s\": should be \"$err(usage)\"" + set err(noLoadOrSource) "at least one of -load and -source must be given" + + # process arguments + set len [llength $args] + if {$len < 6} { + error $err(wrongNumArgs) + } + + # Initialize parameters + array set opts {-name {} -version {} -source {} -load {}} + + # process parameters + for {set i 0} {$i < $len} {incr i} { + set flag [lindex $args $i] + incr i + switch -glob -- $flag { + "-name" - + "-version" { + if {$i >= $len} { + error [format $err(valueMissing) $flag] + } + set opts($flag) [lindex $args $i] + } + "-source" - + "-load" { + if {$i >= $len} { + error [format $err(valueMissing) $flag] + } + lappend opts($flag) [lindex $args $i] + } + default { + error [format $err(unknownOpt) [lindex $args $i]] + } + } + } + + # Validate the parameters + if {![llength $opts(-name)]} { + error [format $err(valueMissing) "-name"] + } + if {![llength $opts(-version)]} { + error [format $err(valueMissing) "-version"] + } + + if {!([llength $opts(-source)] || [llength $opts(-load)])} { + error $err(noLoadOrSource) + } + + # OK, now everything is good. Generate the package ifneeded statement. + set cmdline "package ifneeded $opts(-name) $opts(-version) " + + set cmdList {} + set lazyFileList {} + + # Handle -load and -source specs + foreach key {load source} { + foreach filespec $opts(-$key) { + lassign $filespec filename proclist + + if { [llength $proclist] == 0 } { + set cmd "\[list $key \[file join \$dir [list $filename]\]\]" + lappend cmdList $cmd + } else { + lappend lazyFileList [list $filename $key $proclist] + } + } + } + + if {[llength $lazyFileList]} { + lappend cmdList "\[list tclPkgSetup \$dir $opts(-name)\ + $opts(-version) [list $lazyFileList]\]" + } + append cmdline [join $cmdList "\\n"] + return $cmdline +} + +interp alias {} ::pkg::create {} ::tcl::Pkg::Create diff --git a/mplug_owl2/lib/tcl8.6/safe.tcl b/mplug_owl2/lib/tcl8.6/safe.tcl new file mode 100644 index 0000000000000000000000000000000000000000..8c79abdccbc23e372f1bf81d407fe1c0a64033f1 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/safe.tcl @@ -0,0 +1,1289 @@ +# safe.tcl -- +# +# This file provide a safe loading/sourcing mechanism for safe interpreters. +# It implements a virtual path mechanism to hide the real pathnames from the +# child. It runs in a parent interpreter and sets up data structure and +# aliases that will be invoked when used from a child interpreter. +# +# See the safe.n man page for details. +# +# Copyright (c) 1996-1997 Sun Microsystems, Inc. +# +# See the file "license.terms" for information on usage and redistribution of +# this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# +# The implementation is based on namespaces. These naming conventions are +# followed: +# Private procs starts with uppercase. +# Public procs are exported and starts with lowercase +# + +# Needed utilities package +package require opt 0.4.8 + +# Create the safe namespace +namespace eval ::safe { + # Exported API: + namespace export interpCreate interpInit interpConfigure interpDelete \ + interpAddToAccessPath interpFindInAccessPath setLogCmd +} + +# Helper function to resolve the dual way of specifying staticsok (either +# by -noStatics or -statics 0) +proc ::safe::InterpStatics {} { + foreach v {Args statics noStatics} { + upvar $v $v + } + set flag [::tcl::OptProcArgGiven -noStatics] + if {$flag && (!$noStatics == !$statics) + && ([::tcl::OptProcArgGiven -statics])} { + return -code error\ + "conflicting values given for -statics and -noStatics" + } + if {$flag} { + return [expr {!$noStatics}] + } else { + return $statics + } +} + +# Helper function to resolve the dual way of specifying nested loading +# (either by -nestedLoadOk or -nested 1) +proc ::safe::InterpNested {} { + foreach v {Args nested nestedLoadOk} { + upvar $v $v + } + set flag [::tcl::OptProcArgGiven -nestedLoadOk] + # note that the test here is the opposite of the "InterpStatics" one + # (it is not -noNested... because of the wanted default value) + if {$flag && (!$nestedLoadOk != !$nested) + && ([::tcl::OptProcArgGiven -nested])} { + return -code error\ + "conflicting values given for -nested and -nestedLoadOk" + } + if {$flag} { + # another difference with "InterpStatics" + return $nestedLoadOk + } else { + return $nested + } +} + +#### +# +# API entry points that needs argument parsing : +# +#### + +# Interface/entry point function and front end for "Create" +proc ::safe::interpCreate {args} { + set Args [::tcl::OptKeyParse ::safe::interpCreate $args] + RejectExcessColons $slave + InterpCreate $slave $accessPath \ + [InterpStatics] [InterpNested] $deleteHook +} + +proc ::safe::interpInit {args} { + set Args [::tcl::OptKeyParse ::safe::interpIC $args] + if {![::interp exists $slave]} { + return -code error "\"$slave\" is not an interpreter" + } + RejectExcessColons $slave + InterpInit $slave $accessPath \ + [InterpStatics] [InterpNested] $deleteHook +} + +# Check that the given child is "one of us" +proc ::safe::CheckInterp {child} { + namespace upvar ::safe [VarName $child] state + if {![info exists state] || ![::interp exists $child]} { + return -code error \ + "\"$child\" is not an interpreter managed by ::safe::" + } +} + +# Interface/entry point function and front end for "Configure". This code +# is awfully pedestrian because it would need more coupling and support +# between the way we store the configuration values in safe::interp's and +# the Opt package. Obviously we would like an OptConfigure to avoid +# duplicating all this code everywhere. +# -> TODO (the app should share or access easily the program/value stored +# by opt) + +# This is even more complicated by the boolean flags with no values that +# we had the bad idea to support for the sake of user simplicity in +# create/init but which makes life hard in configure... +# So this will be hopefully written and some integrated with opt1.0 +# (hopefully for tcl8.1 ?) +proc ::safe::interpConfigure {args} { + switch [llength $args] { + 1 { + # If we have exactly 1 argument the semantic is to return all + # the current configuration. We still call OptKeyParse though + # we know that "child" is our given argument because it also + # checks for the "-help" option. + set Args [::tcl::OptKeyParse ::safe::interpIC $args] + CheckInterp $slave + namespace upvar ::safe [VarName $slave] state + + return [join [list \ + [list -accessPath $state(access_path)] \ + [list -statics $state(staticsok)] \ + [list -nested $state(nestedok)] \ + [list -deleteHook $state(cleanupHook)]]] + } + 2 { + # If we have exactly 2 arguments the semantic is a "configure + # get" + lassign $args slave arg + + # get the flag sub program (we 'know' about Opt's internal + # representation of data) + set desc [lindex [::tcl::OptKeyGetDesc ::safe::interpIC] 2] + set hits [::tcl::OptHits desc $arg] + if {$hits > 1} { + return -code error [::tcl::OptAmbigous $desc $arg] + } elseif {$hits == 0} { + return -code error [::tcl::OptFlagUsage $desc $arg] + } + CheckInterp $slave + namespace upvar ::safe [VarName $slave] state + + set item [::tcl::OptCurDesc $desc] + set name [::tcl::OptName $item] + switch -exact -- $name { + -accessPath { + return [list -accessPath $state(access_path)] + } + -statics { + return [list -statics $state(staticsok)] + } + -nested { + return [list -nested $state(nestedok)] + } + -deleteHook { + return [list -deleteHook $state(cleanupHook)] + } + -noStatics { + # it is most probably a set in fact but we would need + # then to jump to the set part and it is not *sure* + # that it is a set action that the user want, so force + # it to use the unambiguous -statics ?value? instead: + return -code error\ + "ambigous query (get or set -noStatics ?)\ + use -statics instead" + } + -nestedLoadOk { + return -code error\ + "ambigous query (get or set -nestedLoadOk ?)\ + use -nested instead" + } + default { + return -code error "unknown flag $name (bug)" + } + } + } + default { + # Otherwise we want to parse the arguments like init and + # create did + set Args [::tcl::OptKeyParse ::safe::interpIC $args] + CheckInterp $slave + namespace upvar ::safe [VarName $slave] state + + # Get the current (and not the default) values of whatever has + # not been given: + if {![::tcl::OptProcArgGiven -accessPath]} { + set doreset 0 + set accessPath $state(access_path) + } else { + set doreset 1 + } + if { + ![::tcl::OptProcArgGiven -statics] + && ![::tcl::OptProcArgGiven -noStatics] + } then { + set statics $state(staticsok) + } else { + set statics [InterpStatics] + } + if { + [::tcl::OptProcArgGiven -nested] || + [::tcl::OptProcArgGiven -nestedLoadOk] + } then { + set nested [InterpNested] + } else { + set nested $state(nestedok) + } + if {![::tcl::OptProcArgGiven -deleteHook]} { + set deleteHook $state(cleanupHook) + } + # we can now reconfigure : + InterpSetConfig $slave $accessPath $statics $nested $deleteHook + # auto_reset the child (to completely synch the new access_path) + if {$doreset} { + if {[catch {::interp eval $slave {auto_reset}} msg]} { + Log $slave "auto_reset failed: $msg" + } else { + Log $slave "successful auto_reset" NOTICE + } + + # Sync the paths used to search for Tcl modules. + ::interp eval $slave {tcl::tm::path remove {*}[tcl::tm::list]} + if {[llength $state(tm_path_slave)] > 0} { + ::interp eval $slave [list \ + ::tcl::tm::add {*}[lreverse $state(tm_path_slave)]] + } + + # Remove stale "package ifneeded" data for non-loaded packages. + # - Not for loaded packages, because "package forget" erases + # data from "package provide" as well as "package ifneeded". + # - This is OK because the script cannot reload any version of + # the package unless it first does "package forget". + foreach pkg [::interp eval $slave {package names}] { + if {[::interp eval $slave [list package provide $pkg]] eq ""} { + ::interp eval $slave [list package forget $pkg] + } + } + } + return + } + } +} + +#### +# +# Functions that actually implements the exported APIs +# +#### + +# +# safe::InterpCreate : doing the real job +# +# This procedure creates a safe interpreter and initializes it with the safe +# base aliases. +# NB: child name must be simple alphanumeric string, no spaces, no (), no +# {},... {because the state array is stored as part of the name} +# +# Returns the child name. +# +# Optional Arguments : +# + child name : if empty, generated name will be used +# + access_path: path list controlling where load/source can occur, +# if empty: the parent auto_path will be used. +# + staticsok : flag, if 0 :no static package can be loaded (load {} Xxx) +# if 1 :static packages are ok. +# + nestedok: flag, if 0 :no loading to sub-sub interps (load xx xx sub) +# if 1 : multiple levels are ok. + +# use the full name and no indent so auto_mkIndex can find us +proc ::safe::InterpCreate { + child + access_path + staticsok + nestedok + deletehook + } { + # Create the child. + # If evaluated in ::safe, the interpreter command for foo is ::foo; + # but for foo::bar is safe::foo::bar. So evaluate in :: instead. + if {$child ne ""} { + namespace eval :: [list ::interp create -safe $child] + } else { + # empty argument: generate child name + set child [::interp create -safe] + } + Log $child "Created" NOTICE + + # Initialize it. (returns child name) + InterpInit $child $access_path $staticsok $nestedok $deletehook +} + +# +# InterpSetConfig (was setAccessPath) : +# Sets up child virtual auto_path and corresponding structure within +# the parent. Also sets the tcl_library in the child to be the first +# directory in the path. +# NB: If you change the path after the child has been initialized you +# probably need to call "auto_reset" in the child in order that it gets +# the right auto_index() array values. + +proc ::safe::InterpSetConfig {child access_path staticsok nestedok deletehook} { + global auto_path + + # determine and store the access path if empty + if {$access_path eq ""} { + set access_path $auto_path + + # Make sure that tcl_library is in auto_path and at the first + # position (needed by setAccessPath) + set where [lsearch -exact $access_path [info library]] + if {$where < 0} { + # not found, add it. + set access_path [linsert $access_path 0 [info library]] + Log $child "tcl_library was not in auto_path,\ + added it to slave's access_path" NOTICE + } elseif {$where != 0} { + # not first, move it first + set access_path [linsert \ + [lreplace $access_path $where $where] \ + 0 [info library]] + Log $child "tcl_libray was not in first in auto_path,\ + moved it to front of slave's access_path" NOTICE + } + + # Add 1st level subdirs (will searched by auto loading from tcl + # code in the child using glob and thus fail, so we add them here + # so by default it works the same). + set access_path [AddSubDirs $access_path] + } + + Log $child "Setting accessPath=($access_path) staticsok=$staticsok\ + nestedok=$nestedok deletehook=($deletehook)" NOTICE + + namespace upvar ::safe [VarName $child] state + + # clear old autopath if it existed + # build new one + # Extend the access list with the paths used to look for Tcl Modules. + # We save the virtual form separately as well, as syncing it with the + # child has to be deferred until the necessary commands are present for + # setup. + + set norm_access_path {} + set slave_access_path {} + set map_access_path {} + set remap_access_path {} + set slave_tm_path {} + + set i 0 + foreach dir $access_path { + set token [PathToken $i] + lappend slave_access_path $token + lappend map_access_path $token $dir + lappend remap_access_path $dir $token + lappend norm_access_path [file normalize $dir] + incr i + } + + set morepaths [::tcl::tm::list] + set firstpass 1 + while {[llength $morepaths]} { + set addpaths $morepaths + set morepaths {} + + foreach dir $addpaths { + # Prevent the addition of dirs on the tm list to the + # result if they are already known. + if {[dict exists $remap_access_path $dir]} { + if {$firstpass} { + # $dir is in [::tcl::tm::list] and belongs in the slave_tm_path. + # Later passes handle subdirectories, which belong in the + # access path but not in the module path. + lappend slave_tm_path [dict get $remap_access_path $dir] + } + continue + } + + set token [PathToken $i] + lappend access_path $dir + lappend slave_access_path $token + lappend map_access_path $token $dir + lappend remap_access_path $dir $token + lappend norm_access_path [file normalize $dir] + if {$firstpass} { + # $dir is in [::tcl::tm::list] and belongs in the slave_tm_path. + # Later passes handle subdirectories, which belong in the + # access path but not in the module path. + lappend slave_tm_path $token + } + incr i + + # [Bug 2854929] + # Recursively find deeper paths which may contain + # modules. Required to handle modules with names like + # 'platform::shell', which translate into + # 'platform/shell-X.tm', i.e arbitrarily deep + # subdirectories. + lappend morepaths {*}[glob -nocomplain -directory $dir -type d *] + } + set firstpass 0 + } + + set state(access_path) $access_path + set state(access_path,map) $map_access_path + set state(access_path,remap) $remap_access_path + set state(access_path,norm) $norm_access_path + set state(access_path,slave) $slave_access_path + set state(tm_path_slave) $slave_tm_path + set state(staticsok) $staticsok + set state(nestedok) $nestedok + set state(cleanupHook) $deletehook + + SyncAccessPath $child + return +} + +# +# +# FindInAccessPath: +# Search for a real directory and returns its virtual Id (including the +# "$") +proc ::safe::interpFindInAccessPath {child path} { + CheckInterp $child + namespace upvar ::safe [VarName $child] state + + if {![dict exists $state(access_path,remap) $path]} { + return -code error "$path not found in access path" + } + + return [dict get $state(access_path,remap) $path] +} + +# +# addToAccessPath: +# add (if needed) a real directory to access path and return its +# virtual token (including the "$"). +proc ::safe::interpAddToAccessPath {child path} { + # first check if the directory is already in there + # (inlined interpFindInAccessPath). + CheckInterp $child + namespace upvar ::safe [VarName $child] state + + if {[dict exists $state(access_path,remap) $path]} { + return [dict get $state(access_path,remap) $path] + } + + # new one, add it: + set token [PathToken [llength $state(access_path)]] + + lappend state(access_path) $path + lappend state(access_path,slave) $token + lappend state(access_path,map) $token $path + lappend state(access_path,remap) $path $token + lappend state(access_path,norm) [file normalize $path] + + SyncAccessPath $child + return $token +} + +# This procedure applies the initializations to an already existing +# interpreter. It is useful when you want to install the safe base aliases +# into a preexisting safe interpreter. +proc ::safe::InterpInit { + child + access_path + staticsok + nestedok + deletehook + } { + # Configure will generate an access_path when access_path is empty. + InterpSetConfig $child $access_path $staticsok $nestedok $deletehook + + # NB we need to add [namespace current], aliases are always absolute + # paths. + + # These aliases let the child load files to define new commands + # This alias lets the child use the encoding names, convertfrom, + # convertto, and system, but not "encoding system " to set the + # system encoding. + # Handling Tcl Modules, we need a restricted form of Glob. + # This alias interposes on the 'exit' command and cleanly terminates + # the child. + + foreach {command alias} { + source AliasSource + load AliasLoad + encoding AliasEncoding + exit interpDelete + glob AliasGlob + } { + ::interp alias $child $command {} [namespace current]::$alias $child + } + + # This alias lets the child have access to a subset of the 'file' + # command functionality. + + ::interp expose $child file + foreach subcommand {dirname extension rootname tail} { + ::interp alias $child ::tcl::file::$subcommand {} \ + ::safe::AliasFileSubcommand $child $subcommand + } + foreach subcommand { + atime attributes copy delete executable exists isdirectory isfile + link lstat mtime mkdir nativename normalize owned readable readlink + rename size stat tempfile type volumes writable + } { + ::interp alias $child ::tcl::file::$subcommand {} \ + ::safe::BadSubcommand $child file $subcommand + } + + # Subcommands of info + foreach {subcommand alias} { + nameofexecutable AliasExeName + } { + ::interp alias $child ::tcl::info::$subcommand \ + {} [namespace current]::$alias $child + } + + # The allowed child variables already have been set by Tcl_MakeSafe(3) + + # Source init.tcl and tm.tcl into the child, to get auto_load and + # other procedures defined: + + if {[catch {::interp eval $child { + source [file join $tcl_library init.tcl] + }} msg opt]} { + Log $child "can't source init.tcl ($msg)" + return -options $opt "can't source init.tcl into slave $child ($msg)" + } + + if {[catch {::interp eval $child { + source [file join $tcl_library tm.tcl] + }} msg opt]} { + Log $child "can't source tm.tcl ($msg)" + return -options $opt "can't source tm.tcl into slave $child ($msg)" + } + + # Sync the paths used to search for Tcl modules. This can be done only + # now, after tm.tcl was loaded. + namespace upvar ::safe [VarName $child] state + if {[llength $state(tm_path_slave)] > 0} { + ::interp eval $child [list \ + ::tcl::tm::add {*}[lreverse $state(tm_path_slave)]] + } + return $child +} + +# Add (only if needed, avoid duplicates) 1 level of sub directories to an +# existing path list. Also removes non directories from the returned +# list. +proc ::safe::AddSubDirs {pathList} { + set res {} + foreach dir $pathList { + if {[file isdirectory $dir]} { + # check that we don't have it yet as a children of a previous + # dir + if {$dir ni $res} { + lappend res $dir + } + foreach sub [glob -directory $dir -nocomplain *] { + if {[file isdirectory $sub] && ($sub ni $res)} { + # new sub dir, add it ! + lappend res $sub + } + } + } + } + return $res +} + +# This procedure deletes a safe interpreter managed by Safe Tcl and cleans up +# associated state. +# - The command will also delete non-Safe-Base interpreters. +# - This is regrettable, but to avoid breaking existing code this should be +# amended at the next major revision by uncommenting "CheckInterp". + +proc ::safe::interpDelete {child} { + Log $child "About to delete" NOTICE + + # CheckInterp $child + namespace upvar ::safe [VarName $child] state + + # When an interpreter is deleted with [interp delete], any sub-interpreters + # are deleted automatically, but this leaves behind their data in the Safe + # Base. To clean up properly, we call safe::interpDelete recursively on each + # Safe Base sub-interpreter, so each one is deleted cleanly and not by + # the automatic mechanism built into [interp delete]. + foreach sub [interp children $child] { + if {[info exists ::safe::[VarName [list $child $sub]]]} { + ::safe::interpDelete [list $child $sub] + } + } + + # If the child has a cleanup hook registered, call it. Check the + # existence because we might be called to delete an interp which has + # not been registered with us at all + + if {[info exists state(cleanupHook)]} { + set hook $state(cleanupHook) + if {[llength $hook]} { + # remove the hook now, otherwise if the hook calls us somehow, + # we'll loop + unset state(cleanupHook) + try { + {*}$hook $child + } on error err { + Log $child "Delete hook error ($err)" + } + } + } + + # Discard the global array of state associated with the child, and + # delete the interpreter. + + if {[info exists state]} { + unset state + } + + # if we have been called twice, the interp might have been deleted + # already + if {[::interp exists $child]} { + ::interp delete $child + Log $child "Deleted" NOTICE + } + + return +} + +# Set (or get) the logging mechanism + +proc ::safe::setLogCmd {args} { + variable Log + set la [llength $args] + if {$la == 0} { + return $Log + } elseif {$la == 1} { + set Log [lindex $args 0] + } else { + set Log $args + } + + if {$Log eq ""} { + # Disable logging completely. Calls to it will be compiled out + # of all users. + proc ::safe::Log {args} {} + } else { + # Activate logging, define proper command. + + proc ::safe::Log {child msg {type ERROR}} { + variable Log + {*}$Log "$type for slave $child : $msg" + return + } + } +} + +# ------------------- END OF PUBLIC METHODS ------------ + +# +# Sets the child auto_path to the parent recorded value. Also sets +# tcl_library to the first token of the virtual path. +# +proc ::safe::SyncAccessPath {child} { + namespace upvar ::safe [VarName $child] state + + set slave_access_path $state(access_path,slave) + ::interp eval $child [list set auto_path $slave_access_path] + + Log $child "auto_path in $child has been set to $slave_access_path"\ + NOTICE + + # This code assumes that info library is the first element in the + # list of auto_path's. See -> InterpSetConfig for the code which + # ensures this condition. + + ::interp eval $child [list \ + set tcl_library [lindex $slave_access_path 0]] +} + +# Returns the virtual token for directory number N. +proc ::safe::PathToken {n} { + # We need to have a ":" in the token string so [file join] on the + # mac won't turn it into a relative path. + return "\$p(:$n:)" ;# Form tested by case 7.2 +} + +# +# translate virtual path into real path +# +proc ::safe::TranslatePath {child path} { + namespace upvar ::safe [VarName $child] state + + # somehow strip the namespaces 'functionality' out (the danger is that + # we would strip valid macintosh "../" queries... : + if {[string match "*::*" $path] || [string match "*..*" $path]} { + return -code error "invalid characters in path $path" + } + + # Use a cached map instead of computed local vars and subst. + + return [string map $state(access_path,map) $path] +} + +# file name control (limit access to files/resources that should be a +# valid tcl source file) +proc ::safe::CheckFileName {child file} { + # This used to limit what can be sourced to ".tcl" and forbid files + # with more than 1 dot and longer than 14 chars, but I changed that + # for 8.4 as a safe interp has enough internal protection already to + # allow sourcing anything. - hobbs + + if {![file exists $file]} { + # don't tell the file path + return -code error "no such file or directory" + } + + if {![file readable $file]} { + # don't tell the file path + return -code error "not readable" + } +} + +# AliasFileSubcommand handles selected subcommands of [file] in safe +# interpreters that are *almost* safe. In particular, it just acts to +# prevent discovery of what home directories exist. + +proc ::safe::AliasFileSubcommand {child subcommand name} { + if {[string match ~* $name]} { + set name ./$name + } + tailcall ::interp invokehidden $child tcl:file:$subcommand $name +} + +# AliasGlob is the target of the "glob" alias in safe interpreters. + +proc ::safe::AliasGlob {child args} { + Log $child "GLOB ! $args" NOTICE + set cmd {} + set at 0 + array set got { + -directory 0 + -nocomplain 0 + -join 0 + -tails 0 + -- 0 + } + + if {$::tcl_platform(platform) eq "windows"} { + set dirPartRE {^(.*)[\\/]([^\\/]*)$} + } else { + set dirPartRE {^(.*)/([^/]*)$} + } + + set dir {} + set virtualdir {} + + while {$at < [llength $args]} { + switch -glob -- [set opt [lindex $args $at]] { + -nocomplain - -- - -tails { + lappend cmd $opt + set got($opt) 1 + incr at + } + -join { + set got($opt) 1 + incr at + } + -types - -type { + lappend cmd -types [lindex $args [incr at]] + incr at + } + -directory { + if {$got($opt)} { + return -code error \ + {"-directory" cannot be used with "-path"} + } + set got($opt) 1 + set virtualdir [lindex $args [incr at]] + incr at + } + -* { + Log $child "Safe base rejecting glob option '$opt'" + return -code error "Safe base rejecting glob option '$opt'" + } + default { + break + } + } + if {$got(--)} break + } + + # Get the real path from the virtual one and check that the path is in the + # access path of that child. Done after basic argument processing so that + # we know if -nocomplain is set. + if {$got(-directory)} { + try { + set dir [TranslatePath $child $virtualdir] + DirInAccessPath $child $dir + } on error msg { + Log $child $msg + if {$got(-nocomplain)} return + return -code error "permission denied" + } + if {$got(--)} { + set cmd [linsert $cmd end-1 -directory $dir] + } else { + lappend cmd -directory $dir + } + } else { + # The code after this "if ... else" block would conspire to return with + # no results in this case, if it were allowed to proceed. Instead, + # return now and reduce the number of cases to be considered later. + Log $child {option -directory must be supplied} + if {$got(-nocomplain)} return + return -code error "permission denied" + } + + # Apply the -join semantics ourselves. + if {$got(-join)} { + set args [lreplace $args $at end [join [lrange $args $at end] "/"]] + } + + # Process the pattern arguments. If we've done a join there is only one + # pattern argument. + + set firstPattern [llength $cmd] + foreach opt [lrange $args $at end] { + if {![regexp $dirPartRE $opt -> thedir thefile]} { + set thedir . + # The *.tm search comes here. + } + # "Special" treatment for (joined) argument {*/pkgIndex.tcl}. + # Do the expansion of "*" here, and filter out any directories that are + # not in the access path. The outcome is to lappend to cmd a path of + # the form $virtualdir/subdir/pkgIndex.tcl for each subdirectory subdir, + # after removing any subdir that are not in the access path. + if {($thedir eq "*") && ($thefile eq "pkgIndex.tcl")} { + set mapped 0 + foreach d [glob -directory [TranslatePath $child $virtualdir] \ + -types d -tails *] { + catch { + DirInAccessPath $child \ + [TranslatePath $child [file join $virtualdir $d]] + lappend cmd [file join $d $thefile] + set mapped 1 + } + } + if {$mapped} continue + # Don't [continue] if */pkgIndex.tcl has no matches in the access + # path. The pattern will now receive the same treatment as a + # "non-special" pattern (and will fail because it includes a "*" in + # the directory name). + } + # Any directory pattern that is not an exact (i.e. non-glob) match to a + # directory in the access path will be rejected here. + # - Rejections include any directory pattern that has glob matching + # patterns "*", "?", backslashes, braces or square brackets, (UNLESS + # it corresponds to a genuine directory name AND that directory is in + # the access path). + # - The only "special matching characters" that remain in patterns for + # processing by glob are in the filename tail. + # - [file join $anything ~${foo}] is ~${foo}, which is not an exact + # match to any directory in the access path. Hence directory patterns + # that begin with "~" are rejected here. Tests safe-16.[5-8] check + # that "file join" remains as required and does not expand ~${foo}. + # - Bug [3529949] relates to unwanted expansion of ~${foo} and this is + # how the present code avoids the bug. All tests safe-16.* relate. + try { + DirInAccessPath $child [TranslatePath $child \ + [file join $virtualdir $thedir]] + } on error msg { + Log $child $msg + if {$got(-nocomplain)} continue + return -code error "permission denied" + } + lappend cmd $opt + } + + Log $child "GLOB = $cmd" NOTICE + + if {$got(-nocomplain) && [llength $cmd] eq $firstPattern} { + return + } + try { + # >>>>>>>>>> HERE'S THE CALL TO SAFE INTERP GLOB <<<<<<<<<< + # - Pattern arguments added to cmd have NOT been translated from tokens. + # Only the virtualdir is translated (to dir). + # - In the pkgIndex.tcl case, there is no "*" in the pattern arguments, + # which are a list of names each with tail pkgIndex.tcl. The purpose + # of the call to glob is to remove the names for which the file does + # not exist. + set entries [::interp invokehidden $child glob {*}$cmd] + } on error msg { + # This is the only place that a call with -nocomplain and no invalid + # "dash-options" can return an error. + Log $child $msg + return -code error "script error" + } + + Log $child "GLOB < $entries" NOTICE + + # Translate path back to what the child should see. + set res {} + set l [string length $dir] + foreach p $entries { + if {[string equal -length $l $dir $p]} { + set p [string replace $p 0 [expr {$l-1}] $virtualdir] + } + lappend res $p + } + + Log $child "GLOB > $res" NOTICE + return $res +} + +# AliasSource is the target of the "source" alias in safe interpreters. + +proc ::safe::AliasSource {child args} { + set argc [llength $args] + # Extended for handling of Tcl Modules to allow not only "source + # filename", but "source -encoding E filename" as well. + if {[lindex $args 0] eq "-encoding"} { + incr argc -2 + set encoding [lindex $args 1] + set at 2 + if {$encoding eq "identity"} { + Log $child "attempt to use the identity encoding" + return -code error "permission denied" + } + } else { + set at 0 + set encoding {} + } + if {$argc != 1} { + set msg "wrong # args: should be \"source ?-encoding E? fileName\"" + Log $child "$msg ($args)" + return -code error $msg + } + set file [lindex $args $at] + + # get the real path from the virtual one. + if {[catch { + set realfile [TranslatePath $child $file] + } msg]} { + Log $child $msg + return -code error "permission denied" + } + + # check that the path is in the access path of that child + if {[catch { + FileInAccessPath $child $realfile + } msg]} { + Log $child $msg + return -code error "permission denied" + } + + # Check that the filename exists and is readable. If it is not, deliver + # this -errorcode so that caller in tclPkgUnknown does not write a message + # to tclLog. Has no effect on other callers of ::source, which are in + # "package ifneeded" scripts. + if {[catch { + CheckFileName $child $realfile + } msg]} { + Log $child "$realfile:$msg" + return -code error -errorcode {POSIX EACCES} $msg + } + + # Passed all the tests, lets source it. Note that we do this all manually + # because we want to control [info script] in the child so information + # doesn't leak so much. [Bug 2913625] + set old [::interp eval $child {info script}] + set replacementMsg "script error" + set code [catch { + set f [open $realfile] + fconfigure $f -eofchar "\x1A {}" + if {$encoding ne ""} { + fconfigure $f -encoding $encoding + } + set contents [read $f] + close $f + ::interp eval $child [list info script $file] + } msg opt] + if {$code == 0} { + set code [catch {::interp eval $child $contents} msg opt] + set replacementMsg $msg + } + catch {interp eval $child [list info script $old]} + # Note that all non-errors are fine result codes from [source], so we must + # take a little care to do it properly. [Bug 2923613] + if {$code == 1} { + Log $child $msg + return -code error $replacementMsg + } + return -code $code -options $opt $msg +} + +# AliasLoad is the target of the "load" alias in safe interpreters. + +proc ::safe::AliasLoad {child file args} { + set argc [llength $args] + if {$argc > 2} { + set msg "load error: too many arguments" + Log $child "$msg ($argc) {$file $args}" + return -code error $msg + } + + # package name (can be empty if file is not). + set package [lindex $args 0] + + namespace upvar ::safe [VarName $child] state + + # Determine where to load. load use a relative interp path and {} + # means self, so we can directly and safely use passed arg. + set target [lindex $args 1] + if {$target ne ""} { + # we will try to load into a sub sub interp; check that we want to + # authorize that. + if {!$state(nestedok)} { + Log $child "loading to a sub interp (nestedok)\ + disabled (trying to load $package to $target)" + return -code error "permission denied (nested load)" + } + } + + # Determine what kind of load is requested + if {$file eq ""} { + # static package loading + if {$package eq ""} { + set msg "load error: empty filename and no package name" + Log $child $msg + return -code error $msg + } + if {!$state(staticsok)} { + Log $child "static packages loading disabled\ + (trying to load $package to $target)" + return -code error "permission denied (static package)" + } + } else { + # file loading + + # get the real path from the virtual one. + try { + set file [TranslatePath $child $file] + } on error msg { + Log $child $msg + return -code error "permission denied" + } + + # check the translated path + try { + FileInAccessPath $child $file + } on error msg { + Log $child $msg + return -code error "permission denied (path)" + } + } + + try { + return [::interp invokehidden $child load $file $package $target] + } on error msg { + # Some packages return no error message. + set msg0 "load of binary library for package $package failed" + if {$msg eq {}} { + set msg $msg0 + } else { + set msg "$msg0: $msg" + } + Log $child $msg + return -code error $msg + } +} + +# FileInAccessPath raises an error if the file is not found in the list of +# directories contained in the (parent side recorded) child's access path. + +# the security here relies on "file dirname" answering the proper +# result... needs checking ? +proc ::safe::FileInAccessPath {child file} { + namespace upvar ::safe [VarName $child] state + set access_path $state(access_path) + + if {[file isdirectory $file]} { + return -code error "\"$file\": is a directory" + } + set parent [file dirname $file] + + # Normalize paths for comparison since lsearch knows nothing of + # potential pathname anomalies. + set norm_parent [file normalize $parent] + + namespace upvar ::safe [VarName $child] state + if {$norm_parent ni $state(access_path,norm)} { + return -code error "\"$file\": not in access_path" + } +} + +proc ::safe::DirInAccessPath {child dir} { + namespace upvar ::safe [VarName $child] state + set access_path $state(access_path) + + if {[file isfile $dir]} { + return -code error "\"$dir\": is a file" + } + + # Normalize paths for comparison since lsearch knows nothing of + # potential pathname anomalies. + set norm_dir [file normalize $dir] + + namespace upvar ::safe [VarName $child] state + if {$norm_dir ni $state(access_path,norm)} { + return -code error "\"$dir\": not in access_path" + } +} + +# This procedure is used to report an attempt to use an unsafe member of an +# ensemble command. + +proc ::safe::BadSubcommand {child command subcommand args} { + set msg "not allowed to invoke subcommand $subcommand of $command" + Log $child $msg + return -code error -errorcode {TCL SAFE SUBCOMMAND} $msg +} + +# AliasEncoding is the target of the "encoding" alias in safe interpreters. + +proc ::safe::AliasEncoding {child option args} { + # Note that [encoding dirs] is not supported in safe children at all + set subcommands {convertfrom convertto names system} + try { + set option [tcl::prefix match -error [list -level 1 -errorcode \ + [list TCL LOOKUP INDEX option $option]] $subcommands $option] + # Special case: [encoding system] ok, but [encoding system foo] not + if {$option eq "system" && [llength $args]} { + return -code error -errorcode {TCL WRONGARGS} \ + "wrong # args: should be \"encoding system\"" + } + } on error {msg options} { + Log $child $msg + return -options $options $msg + } + tailcall ::interp invokehidden $child encoding $option {*}$args +} + +# Various minor hiding of platform features. [Bug 2913625] + +proc ::safe::AliasExeName {child} { + return "" +} + +# ------------------------------------------------------------------------------ +# Using Interpreter Names with Namespace Qualifiers +# ------------------------------------------------------------------------------ +# (1) We wish to preserve compatibility with existing code, in which Safe Base +# interpreter names have no namespace qualifiers. +# (2) safe::interpCreate and the rest of the Safe Base previously could not +# accept namespace qualifiers in an interpreter name. +# (3) The interp command will accept namespace qualifiers in an interpreter +# name, but accepts distinct interpreters that will have the same command +# name (e.g. foo, ::foo, and :::foo) (bug 66c2e8c974). +# (4) To satisfy these constraints, Safe Base interpreter names will be fully +# qualified namespace names with no excess colons and with the leading "::" +# omitted. +# (5) Trailing "::" implies a namespace tail {}, which interp reads as {{}}. +# Reject such names. +# (6) We could: +# (a) EITHER reject usable but non-compliant names (e.g. excess colons) in +# interpCreate, interpInit; +# (b) OR accept such names and then translate to a compliant name in every +# command. +# The problem with (b) is that the user will expect to use the name with the +# interp command and will find that it is not recognised. +# E.g "interpCreate ::foo" creates interpreter "foo", and the user's name +# "::foo" works with all the Safe Base commands, but "interp eval ::foo" +# fails. +# So we choose (a). +# (7) The command +# namespace upvar ::safe S$child state +# becomes +# namespace upvar ::safe [VarName $child] state +# ------------------------------------------------------------------------------ + +proc ::safe::RejectExcessColons {child} { + set stripped [regsub -all -- {:::*} $child ::] + if {[string range $stripped end-1 end] eq {::}} { + return -code error {interpreter name must not end in "::"} + } + if {$stripped ne $child} { + set msg {interpreter name has excess colons in namespace separators} + return -code error $msg + } + if {[string range $stripped 0 1] eq {::}} { + return -code error {interpreter name must not begin "::"} + } + return +} + +proc ::safe::VarName {child} { + # return S$child + return S[string map {:: @N @ @A} $child] +} + +proc ::safe::Setup {} { + #### + # + # Setup the arguments parsing + # + #### + + # Share the descriptions + set temp [::tcl::OptKeyRegister { + {-accessPath -list {} "access path for the slave"} + {-noStatics "prevent loading of statically linked pkgs"} + {-statics true "loading of statically linked pkgs"} + {-nestedLoadOk "allow nested loading"} + {-nested false "nested loading"} + {-deleteHook -script {} "delete hook"} + }] + + # create case (slave is optional) + ::tcl::OptKeyRegister { + {?slave? -name {} "name of the slave (optional)"} + } ::safe::interpCreate + + # adding the flags sub programs to the command program (relying on Opt's + # internal implementation details) + lappend ::tcl::OptDesc(::safe::interpCreate) $::tcl::OptDesc($temp) + + # init and configure (slave is needed) + ::tcl::OptKeyRegister { + {slave -name {} "name of the slave"} + } ::safe::interpIC + + # adding the flags sub programs to the command program (relying on Opt's + # internal implementation details) + lappend ::tcl::OptDesc(::safe::interpIC) $::tcl::OptDesc($temp) + + # temp not needed anymore + ::tcl::OptKeyDelete $temp + + #### + # + # Default: No logging. + # + #### + + setLogCmd {} + + # Log eventually. + # To enable error logging, set Log to {puts stderr} for instance, + # via setLogCmd. + return +} + +namespace eval ::safe { + # internal variables + + # Log command, set via 'setLogCmd'. Logging is disabled when empty. + variable Log {} + + # The package maintains a state array per child interp under its + # control. The name of this array is S. This array is + # brought into scope where needed, using 'namespace upvar'. The S + # prefix is used to avoid that a child interp called "Log" smashes + # the "Log" variable. + # + # The array's elements are: + # + # access_path : List of paths accessible to the child. + # access_path,norm : Ditto, in normalized form. + # access_path,slave : Ditto, as the path tokens as seen by the child. + # access_path,map : dict ( token -> path ) + # access_path,remap : dict ( path -> token ) + # tm_path_slave : List of TM root directories, as tokens seen by the child. + # staticsok : Value of option -statics + # nestedok : Value of option -nested + # cleanupHook : Value of option -deleteHook +} + +::safe::Setup diff --git a/mplug_owl2/lib/tcl8.6/tclIndex b/mplug_owl2/lib/tcl8.6/tclIndex new file mode 100644 index 0000000000000000000000000000000000000000..a186a7d258d56bad2090c618bdf2bc3645dfd882 --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/tclIndex @@ -0,0 +1,79 @@ +# Tcl autoload index file, version 2.0 +# -*- tcl -*- +# This file is generated by the "auto_mkindex" command +# and sourced to set up indexing information for one or +# more commands. Typically each line is a command that +# sets an element in the auto_index array, where the +# element name is the name of a command and the value is +# a script that loads the command. + +set auto_index(auto_reset) [list source [file join $dir auto.tcl]] +set auto_index(tcl_findLibrary) [list source [file join $dir auto.tcl]] +set auto_index(auto_mkindex) [list source [file join $dir auto.tcl]] +set auto_index(auto_mkindex_old) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::init) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::cleanup) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::mkindex) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::hook) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::slavehook) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::command) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::commandInit) [list source [file join $dir auto.tcl]] +set auto_index(::auto_mkindex_parser::fullname) [list source [file join $dir auto.tcl]] +set auto_index(history) [list source [file join $dir history.tcl]] +set auto_index(::tcl::history) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistAdd) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistKeep) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistClear) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistInfo) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistRedo) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistIndex) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistEvent) [list source [file join $dir history.tcl]] +set auto_index(::tcl::HistChange) [list source [file join $dir history.tcl]] +set auto_index(pkg_mkIndex) [list source [file join $dir package.tcl]] +set auto_index(tclPkgSetup) [list source [file join $dir package.tcl]] +set auto_index(tclPkgUnknown) [list source [file join $dir package.tcl]] +set auto_index(::tcl::MacOSXPkgUnknown) [list source [file join $dir package.tcl]] +set auto_index(::pkg::create) [list source [file join $dir package.tcl]] +set auto_index(parray) [list source [file join $dir parray.tcl]] +set auto_index(::safe::InterpStatics) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpNested) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpCreate) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpInit) [list source [file join $dir safe.tcl]] +set auto_index(::safe::CheckInterp) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpConfigure) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpCreate) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpSetConfig) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpFindInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpAddToAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::InterpInit) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AddSubDirs) [list source [file join $dir safe.tcl]] +set auto_index(::safe::interpDelete) [list source [file join $dir safe.tcl]] +set auto_index(::safe::setLogCmd) [list source [file join $dir safe.tcl]] +set auto_index(::safe::SyncAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::PathToken) [list source [file join $dir safe.tcl]] +set auto_index(::safe::TranslatePath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::Log) [list source [file join $dir safe.tcl]] +set auto_index(::safe::CheckFileName) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasGlob) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasSource) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasLoad) [list source [file join $dir safe.tcl]] +set auto_index(::safe::FileInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::DirInAccessPath) [list source [file join $dir safe.tcl]] +set auto_index(::safe::Subset) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasSubset) [list source [file join $dir safe.tcl]] +set auto_index(::safe::AliasEncoding) [list source [file join $dir safe.tcl]] +set auto_index(tcl_wordBreakAfter) [list source [file join $dir word.tcl]] +set auto_index(tcl_wordBreakBefore) [list source [file join $dir word.tcl]] +set auto_index(tcl_endOfWord) [list source [file join $dir word.tcl]] +set auto_index(tcl_startOfNextWord) [list source [file join $dir word.tcl]] +set auto_index(tcl_startOfPreviousWord) [list source [file join $dir word.tcl]] +set auto_index(::tcl::tm::add) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::remove) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::list) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::Defaults) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::UnknownHandler) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::roots) [list source [file join $dir tm.tcl]] +set auto_index(::tcl::tm::path) [list source [file join $dir tm.tcl]] +if {[namespace exists ::tcl::unsupported]} { + set auto_index(timerate) {namespace import ::tcl::unsupported::timerate} +} diff --git a/mplug_owl2/lib/tcl8.6/word.tcl b/mplug_owl2/lib/tcl8.6/word.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a993918b44c590b0b1eba3d7ae64cebf195e644a --- /dev/null +++ b/mplug_owl2/lib/tcl8.6/word.tcl @@ -0,0 +1,154 @@ +# word.tcl -- +# +# This file defines various procedures for computing word boundaries in +# strings. This file is primarily needed so Tk text and entry widgets behave +# properly for different platforms. +# +# Copyright (c) 1996 Sun Microsystems, Inc. +# Copyright (c) 1998 Scriptics Corporation. +# +# See the file "license.terms" for information on usage and redistribution +# of this file, and for a DISCLAIMER OF ALL WARRANTIES. + +# The following variables are used to determine which characters are +# interpreted as white space. + +if {$::tcl_platform(platform) eq "windows"} { + # Windows style - any but a Unicode space char + if {![info exists ::tcl_wordchars]} { + set ::tcl_wordchars {\S} + } + if {![info exists ::tcl_nonwordchars]} { + set ::tcl_nonwordchars {\s} + } +} else { + # Motif style - any Unicode word char (number, letter, or underscore) + if {![info exists ::tcl_wordchars]} { + set ::tcl_wordchars {\w} + } + if {![info exists ::tcl_nonwordchars]} { + set ::tcl_nonwordchars {\W} + } +} + +# Arrange for caches of the real matcher REs to be kept, which enables the REs +# themselves to be cached for greater performance (and somewhat greater +# clarity too). + +namespace eval ::tcl { + variable WordBreakRE + array set WordBreakRE {} + + proc UpdateWordBreakREs args { + # Ignores the arguments + global tcl_wordchars tcl_nonwordchars + variable WordBreakRE + + # To keep the RE strings short... + set letter $tcl_wordchars + set space $tcl_nonwordchars + + set WordBreakRE(after) "$letter$space|$space$letter" + set WordBreakRE(before) "^.*($letter$space|$space$letter)" + set WordBreakRE(end) "$space*$letter+$space" + set WordBreakRE(next) "$letter*$space+$letter" + set WordBreakRE(previous) "$space*($letter+)$space*\$" + } + + # Initialize the cache + UpdateWordBreakREs + trace add variable ::tcl_wordchars write ::tcl::UpdateWordBreakREs + trace add variable ::tcl_nonwordchars write ::tcl::UpdateWordBreakREs +} + +# tcl_wordBreakAfter -- +# +# This procedure returns the index of the first word boundary after the +# starting point in the given string, or -1 if there are no more boundaries in +# the given string. The index returned refers to the first character of the +# pair that comprises a boundary. +# +# Arguments: +# str - String to search. +# start - Index into string specifying starting point. + +proc tcl_wordBreakAfter {str start} { + variable ::tcl::WordBreakRE + set result {-1 -1} + regexp -indices -start $start -- $WordBreakRE(after) $str result + return [lindex $result 1] +} + +# tcl_wordBreakBefore -- +# +# This procedure returns the index of the first word boundary before the +# starting point in the given string, or -1 if there are no more boundaries in +# the given string. The index returned refers to the second character of the +# pair that comprises a boundary. +# +# Arguments: +# str - String to search. +# start - Index into string specifying starting point. + +proc tcl_wordBreakBefore {str start} { + variable ::tcl::WordBreakRE + set result {-1 -1} + regexp -indices -- $WordBreakRE(before) [string range $str 0 $start] result + return [lindex $result 1] +} + +# tcl_endOfWord -- +# +# This procedure returns the index of the first end-of-word location after a +# starting index in the given string. An end-of-word location is defined to be +# the first whitespace character following the first non-whitespace character +# after the starting point. Returns -1 if there are no more words after the +# starting point. +# +# Arguments: +# str - String to search. +# start - Index into string specifying starting point. + +proc tcl_endOfWord {str start} { + variable ::tcl::WordBreakRE + set result {-1 -1} + regexp -indices -start $start -- $WordBreakRE(end) $str result + return [lindex $result 1] +} + +# tcl_startOfNextWord -- +# +# This procedure returns the index of the first start-of-word location after a +# starting index in the given string. A start-of-word location is defined to +# be a non-whitespace character following a whitespace character. Returns -1 +# if there are no more start-of-word locations after the starting point. +# +# Arguments: +# str - String to search. +# start - Index into string specifying starting point. + +proc tcl_startOfNextWord {str start} { + variable ::tcl::WordBreakRE + set result {-1 -1} + regexp -indices -start $start -- $WordBreakRE(next) $str result + return [lindex $result 1] +} + +# tcl_startOfPreviousWord -- +# +# This procedure returns the index of the first start-of-word location before +# a starting index in the given string. +# +# Arguments: +# str - String to search. +# start - Index into string specifying starting point. + +proc tcl_startOfPreviousWord {str start} { + variable ::tcl::WordBreakRE + set word {-1 -1} + if {$start > 0} { + regexp -indices -- $WordBreakRE(previous) [string range [string range $str 0 $start] 0 end-1] \ + result word + } + return [lindex $word 0] +} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h new file mode 100644 index 0000000000000000000000000000000000000000..c64643546a2c1097a7a323dafc6cf5079d1b2fd9 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ATenCUDAGeneral.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include +#include + +#include + +// Use TORCH_CUDA_CPP_API or TORCH_CUDA_CU_API for exports from this folder diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b0b1412298d7b65bd0354d234bbddd09f19031a7 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh @@ -0,0 +1,47 @@ +#include + +#include + +namespace at::cuda { + +/** + Computes ceil(a / b) +*/ +template +__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) { + return (a + b - 1) / b; +} + +namespace { + +// Threads per block for our apply kernel +// FIXME: use occupancy calculator instead +constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512; +constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4; + +template +inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + if (curDevice == -1) return false; + uint64_t numel_per_thread = static_cast(max_threads_per_block) * static_cast(step); + uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread); + uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0]; + if (numBlocks > maxGridX) + numBlocks = maxGridX; + grid = dim3(numBlocks); + return true; +} + +constexpr int getApplyBlocksPerSM() { + return AT_APPLY_BLOCKS_PER_SM; +} + +constexpr int getApplyBlockSize() { + return AT_APPLY_THREADS_PER_BLOCK; +} + +inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) { + return dim3(max_threads_per_block); +} + +} // anonymous namespace +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh new file mode 100644 index 0000000000000000000000000000000000000000..5a127b4d7507f98049d59a4c95386ffeb8b1c4f1 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Atomic.cuh @@ -0,0 +1,514 @@ +#pragma once + +#include +#include +#include + +#include + +#if !(defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800)))) +#include +#endif + +template +struct AtomicFPOp; + +template <> +struct AtomicFPOp { + template + inline __device__ at::Half operator() (at::Half *address, at::Half val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::Half hsum; + do { + assumed = old; + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + hsum = func(hsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return hsum; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ at::BFloat16 operator() (at::BFloat16 *address, at::BFloat16 val, const func_t& func) { + unsigned int * address_as_ui = + (unsigned int *) ((char *)address - ((size_t)address & 2)); + unsigned int old = *address_as_ui; + unsigned int assumed; + + at::BFloat16 bsum; + do { + assumed = old; + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + bsum = func(bsum, val); + old = (size_t)address & 2 ? (old & 0xffff) | (bsum.x << 16) : (old & 0xffff0000) | bsum.x; + old = atomicCAS(address_as_ui, assumed, old); + } while (assumed != old); + bsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); + return bsum.x; + } +}; + +template <> +struct AtomicFPOp { + template + inline __device__ double operator() (double * address, double val, const func_t& func) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull; + unsigned long long int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, func(val, assumed)); + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __longlong_as_double(old); + } +}; + +#define ATOMIC_INTEGER_IMPL(NAME) \ +template \ +struct Atomic##NAME##IntegerImpl; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 3; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + uint32_t old = *address_as_ui; \ + uint32_t shift = offset * 8; \ + uint32_t old_byte; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_byte = (old >> shift) & 0xff; \ + newval = static_cast(func(val, static_cast(old_byte))); \ + newval = (old & ~(0x000000ff << shift)) | (newval << shift); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + size_t offset = (size_t)address & 2; \ + uint32_t * address_as_ui = (uint32_t *)((char *)address - offset); \ + bool is_32_align = offset; \ + uint32_t old = *address_as_ui; \ + uint32_t old_bytes; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + old_bytes = is_32_align ? old >> 16 : old & 0xffff; \ + newval = static_cast(func(val, static_cast(old_bytes))); \ + newval = is_32_align ? (old & 0xffff) | (newval << 16) : (old & 0xffff0000) | newval; \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + uint32_t * address_as_ui = (uint32_t *) (address); \ + uint32_t old = *address_as_ui; \ + uint32_t newval; \ + uint32_t assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; \ + \ +template \ +struct Atomic##NAME##IntegerImpl { \ + template \ + inline __device__ void operator()(T *address, T val, const func_t& func) { \ + unsigned long long * address_as_ui = (unsigned long long *) (address); \ + unsigned long long old = *address_as_ui; \ + unsigned long long newval; \ + unsigned long long assumed; \ + \ + do { \ + assumed = old; \ + newval = static_cast(func(val, static_cast(old))); \ + old = atomicCAS(address_as_ui, assumed, newval); \ + } while (assumed != old); \ + } \ +}; + + +# define GPU_ATOMIC_INTEGER(NAME, OP, DTYPE) \ +inline __device__ void gpuAtomic##NAME(DTYPE *address, DTYPE val) { \ +Atomic##NAME##IntegerImpl()(address, \ + val, \ + [](DTYPE a, DTYPE b) { \ + return OP; \ + }); \ +} \ + +ATOMIC_INTEGER_IMPL(Add) +GPU_ATOMIC_INTEGER(Add, a || b, bool) + +// Don't instantiate gpuAtomicAdd with the macro as it seems non-standard (see int32, int64) +inline __device__ void gpuAtomicAdd(uint8_t *address, uint8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](uint8_t a, uint8_t b) { + return a + b; + }); +} + +inline __device__ void gpuAtomicAdd(int8_t *address, int8_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int8_t a, int8_t b) { + return a + b; + }); +} + +inline __device__ void gpuAtomicAdd(int16_t *address, int16_t val) { + AtomicAddIntegerImpl()(address, + val, + [](int16_t a, int16_t b) { + return a + b; + }); +} + +inline __device__ int32_t gpuAtomicAdd(int32_t *address, int32_t val) { + return atomicAdd(address, val); +} + +inline __device__ void gpuAtomicAdd(int64_t *address, int64_t val) { +#if defined(USE_ROCM) + __atomic_fetch_add(address, val, __ATOMIC_RELAXED); +#else + static_assert(sizeof(unsigned long long int) == sizeof(int64_t), "bitwidth change is not allowed"); + atomicAdd(reinterpret_cast(address), static_cast(val)); +#endif +} + +inline __device__ at::Half gpuAtomicAdd(at::Half *address, at::Half val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700))) + return AtomicFPOp()(address, val, + [](at::Half hsum, at::Half val) { + return hsum + val; + }); +#else + return atomicAdd(reinterpret_cast<__half*>(address), val); +#endif +} + +inline __device__ at::BFloat16 gpuAtomicAdd(at::BFloat16 *address, at::BFloat16 val) { +#if defined(USE_ROCM) || ((defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))) +return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum + val; + }); +#else + __nv_bfloat16 r = atomicAdd(reinterpret_cast<__nv_bfloat16*>(address), *reinterpret_cast<__nv_bfloat16*>(&val)); + return *reinterpret_cast(&r); +#endif +} + +#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 600) +// from CUDA C Programmic Guide +inline __device__ double atomicAdd(double* address, double val) +#if defined(__clang__) && defined(__CUDA__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wgcc-compat" + __attribute__((enable_if(true, ""))) +#pragma GCC diagnostic pop +#endif +{ + + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val + __longlong_as_double(assumed)); + }); +} +#elif defined(USE_ROCM) || !(defined(__CUDA_ARCH__)) + +/* Note [hip-clang differences to hcc] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * The upcoming hip-clang compiler for ROCm differs from hcc in a few details. + * It exports the __HIP__ macro, we can hence differentiate between hcc and + * hip-clang. In the below, hcc only received support for atomicAdd with double + * typing after work week 18312. hip-clang had support from the first version. + * In general, the code-visible differences between hip-clang and hcc will be + * minimal. + */ + +#if defined(USE_ROCM) && __hcc_workweek__ < 18312 && !__HIP__ + // This needs to be defined for the host side pass + inline __device__ double atomicAdd(double *address, double val) { } +#endif +#endif + +inline __device__ double gpuAtomicAdd(double *address, double val) { + return atomicAdd(address, val); +} + +inline __device__ float gpuAtomicAdd(float *address, float val) { + return atomicAdd(address, val); +} + +template +inline __device__ void gpuAtomicAdd(c10::complex *address, c10::complex val) { + gpuAtomicAdd(&address->real_, val.real_); + gpuAtomicAdd(&address->imag_, val.imag_); +} + +/* Note [gpuAtomicAdd vs atomicAdd] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Some extensions such as torchvision call atomicAdd() + * directly and require non-library provided data type support. Only for these, we + * continue to provide atomicAdd overloads. + */ +inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) { + return gpuAtomicAdd(address, val); +} + +inline __device__ at::BFloat16 atomicAdd(at::BFloat16 *address, at::BFloat16 val) { + return gpuAtomicAdd(address, val); +} + +inline __device__ void atomicAdd(uint8_t *address, uint8_t val) { + gpuAtomicAdd(address, val); +} + +inline __device__ void atomicAdd(int8_t *address, int8_t val) { + gpuAtomicAdd(address, val); +} + +inline __device__ void atomicAdd(int16_t *address, int16_t val) { + gpuAtomicAdd(address, val); +} + +inline __device__ void atomicAdd(int64_t *address, int64_t val) { + gpuAtomicAdd(address, val); +} + +inline __device__ void atomicAdd(bool *address, bool val) { + gpuAtomicAdd(address, val); +} + +/* Note [explicitly non-returning atomics] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * AMD's MI100 (gfx908) provides an optimized fp32 atomicAdd, exposed via atomicAddNoRet(). + * Due to compiler limitations, callers must opt-in to guarantee the optimized instruction. + * This non-returning atomicAddNoRet cannot be used to implement the returning atomicAdd, + * therefore we need a new API 'gpuAtomicAddNoReturn'. + */ +template +inline __device__ void gpuAtomicAddNoReturn(c10::complex *address, c10::complex val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(uint8_t *address, uint8_t val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(int8_t *address, int8_t val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(int16_t *address, int16_t val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(int32_t *address, int32_t val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(int64_t *address, int64_t val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(bool *address, bool val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(at::Half *address, at::Half val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(at::BFloat16 *address, at::BFloat16 val) { gpuAtomicAdd(address, val); } +inline __device__ void gpuAtomicAddNoReturn(double *address, double val) { gpuAtomicAdd(address, val); } + +/* Special case fp32 atomic. */ +#if defined(USE_ROCM) +inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { +#if defined(__gfx908__) + atomicAddNoRet(address, val); +#else + (void)unsafeAtomicAdd(address, val); +#endif +} +#else +inline __device__ void gpuAtomicAddNoReturn(float *address, float val) { gpuAtomicAdd(address, val); } +#endif + +// Atomic multiplication implementation. + +ATOMIC_INTEGER_IMPL(Mul) +GPU_ATOMIC_INTEGER(Mul, a * b, uint8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int8_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int16_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int32_t) +GPU_ATOMIC_INTEGER(Mul, a * b, int64_t) + +inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return bsum * val; + }); +} + +inline __device__ at::BFloat16 gpuAtomicMul(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return bsum * val; + }); +} + +inline __device__ double gpuAtomicMul(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(val * __longlong_as_double(assumed)); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMul (float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(val * + __int_as_float(assumed))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic maximum implementation. + +template +__host__ __device__ T safe_max(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b)); + #else + T max = at::_isnan(b) ? b : std::max(a, b); + #endif + + return max; +} + +ATOMIC_INTEGER_IMPL(Max) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int16_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int32_t) +GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_max(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_max(bsum, val); + }); +} + +inline __device__ double gpuAtomicMax(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_max(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMax(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_max(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} + +// Atomic minimum implementation. + +template +__host__ __device__ T safe_min(T a, T b) { + #if defined(__HIPCC__) + // TODO: remove this special case for HIP when issue is fixed: + // https://github.com/ROCm-Developer-Tools/HIP/issues/2209 + T min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b)); + #else + T min = at::_isnan(b) ? b : std::min(a, b); + #endif + + return min; +} + +ATOMIC_INTEGER_IMPL(Min) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int16_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int32_t) +GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int64_t) + +inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) { + return AtomicFPOp()(address, val, + [](at::Half bsum, at::Half val) { + return safe_min(bsum, val); + }); +} + +inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) { + return AtomicFPOp()(address, val, + [](at::BFloat16 bsum, at::BFloat16 val) { + return safe_min(bsum, val); + }); +} + +inline __device__ double gpuAtomicMin(double * address, double val) { + return AtomicFPOp()(address, val, + [](double val, unsigned long long int assumed) { + return __double_as_longlong(safe_min(val, __longlong_as_double(assumed))); + }); +} + +// Dont use a templated function for this since the addition function defaults to the CUDA built-in. +inline __device__ float gpuAtomicMin(float * address, float val) { + unsigned int* address_as_ull = (unsigned int*)address; + unsigned int old = *address_as_ull; + unsigned int assumed; + + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __float_as_int(safe_min(val, __int_as_float(assumed)))); + + // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) + } while (assumed != old); + + return __int_as_float(old); +} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4dcdabf17b3b90ad598db48f7210e3932142aaad --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAApplyUtils.cuh @@ -0,0 +1,537 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// +// This file contains pointwise operation functions and kernels that +// work on both contiguous and non-contiguous tensor arguments of +// arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without +// copying or temporary storage. +// + +/* + NOTE [ CUDA_tensor_applyN helpers ] + + The following CUDA_tensor_applyN (where N currently can be 1, 2, 3, or 4) + functions apply a pointwise operator to N tensor(s). + + The calling convention is + + 1. The template arguments should be, sequentially, + - First N typename args specify the scalar types of each of the N tensors. + - (Optional) `int step` arg specifies the number of elements processed + together at the same time. + Default is 1. + - A usually omitted (i.e., inferred) typename arg specifies the type of the + function/functor applied on `N * step` values in each iteration of each + CUDA thread. + 2. The arguments should be, sequentially, + - N tensors + - op: a function/functor that processes `N * step` values at the same time. + - If `step == 1`, it must have signature + `void(*)(scalar1_t&, scalar2_t&, ..., scalarN_t&)`, where + `scalar*_t`s are the first N typename template args, and the inputs + are the `N` values from the `N` tensors retrieved at a common index. + - Otherwise, it must must have signature + void(*)(int n, scalar1_t&, scalar1_t&, ..., scalar1_t&, // repeat `step` times + scalar2_t&, scalar2_t&, ..., scalar2_t&, // repeat `step` times + ..., + scalarN_t&, scalarN_t&, ..., scalarN_t&) // repeat `step` times + Different from `step == 1` case, it processes `N * step` values taken + from `step` common indices. Moreover, the first input `n` represents the + number of valid indices (it will always have `0 < n <= step`). It will + almost always be `step`, but at the boundary we may not have full `step` + elements and `n` can be a lesser value. + + E.g., if `step == 4` and `N == 2`, `op` could be + + [](int n, scalar1_t &u1, scalar1_t &u2, scalar1_t &u3, scalar1_t &u4, + scalar2_t &v1, scalar2_t &v2, scalar2_t &v3, scalar2_t &v4) { + // Only process u1, ..., un and v1, ..., vn. + // So if `n == 3`, `u4` and `v4` need not to be considered. + } + + In both cases, the references can actually be const, but at least one of + them should be non-const in order to write the output. + - (Optional, but recommended) N TensorArgType args that specify for each + tensor whether `op` reads AND writes ] (i.e., TensorArgType::ReadWrite), + or only reads (i.e., TensorArgType::ReadOnly). + Default is TensorArgType::ReadWrite for first Tensor, and + TensorArgType::ReadOnly for the rest. + + E.g., + + to compute a = b^2 for a and b of same dtype, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (scalar &a_val, const scalar &b_val) { a_val = b_val * b_val; } + ); + + to work on 2 values at the same time, we can call + + CUDA_tensor_apply2( + a, b, + [] __device__ (int n, scalar1 &a_val1, scalar1 &a_val2, + const scalar2 &b_val1, const scalar2 &b_val2) { + // call special vectorized op here, or just do elementwise and enjoy unrolling... + // if n == 1, only process a_val1 and b_val1 + } + ); +*/ + +namespace at::cuda { + +// TODO: combine with TensorArg? So far that's been for debugging, and this is functional... +enum class TensorArgType { ReadWrite, ReadOnly }; + +namespace { + +// Rearrange dimensions for pointwise operations so that strides are in +// decreasing order as much as possible, so that kernels have better memory +// access patterns. +// +// For example, consider a binary operation on two "transposed" 2-dim tensors: +// sizes: 256 512 +// aInfo->strides: 1 256 +// bInfo->strides: 1 256 +// +// Given this, each concurrent memory access inside kernelPointwiseApply2() is +// exactly 256 elements apart, resulting in poor performance. +// +// This function exchanges dimensions so that memory access is contiguous: +// sizes: 512 256 +// aInfo->strides: 256 1 +// bInfo->strides: 256 1 +// +// (Actually, it becomes even better because now collapseDims() can turn each +// input into one contiguous array.) +// +// In general, given M (<=4) TensorInfo's with N dimensions, we can view each +// strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange +// strides[i] and [j] if +// (1) strides[i][k] < strides[j][k] for some k (0 <= k < M) +// (exchanging them will benefit input #k), and +// (2) strides[i][k] <= strieds[j][k] for all k +// (exchanging them will not make any input worse). +template +inline void rearrangeDims(detail::TensorInfo* aInfo, + detail::TensorInfo* bInfo = nullptr, + detail::TensorInfo* cInfo = nullptr, + detail::TensorInfo* dInfo = nullptr) { + int numInfos = 1; + int dims = aInfo->dims; + IndexType *sizes[4] = { aInfo->sizes, }; + IndexType *strides[4] = { aInfo->strides, }; + + if (bInfo != nullptr) { + ++numInfos; + if (bInfo->dims != dims) return; + sizes[1] = bInfo->sizes; + strides[1] = bInfo->strides; + } + + if (cInfo != nullptr) { + ++numInfos; + if (cInfo->dims != dims) return; + sizes[2] = cInfo->sizes; + strides[2] = cInfo->strides; + } + + if (dInfo != nullptr) { + ++numInfos; + if (dInfo->dims != dims) return; + sizes[3] = dInfo->sizes; + strides[3] = dInfo->strides; + } + + // Bail out if sizes do not match: we are using "deprecated pointwise + // behavior" among tensors of different shapes but same number of elements. + for (int i = 1; i < numInfos; ++i) { + for (int j = 0; j < dims; ++j) { + if (sizes[i][j] != sizes[0][j]) return; + } + } + + for (int i = 0; i < dims - 1; ++i) { + // No need to consider dimensions of size 1. + if (sizes[0][i] == 1) continue; + + for (int j = i + 1; j < dims; ++j) { + if (sizes[0][j] == 1) continue; + + // Compare the relative sizes of strides between dim #i and dim #j. + bool hasIncreasingStrides = false; + bool hasDecreasingStrides = false; + + for (int k = 0; k < numInfos; k++) { + IndexType stride_i = strides[k][i]; + IndexType stride_j = strides[k][j]; + if (stride_i < stride_j) { + hasIncreasingStrides = true; + } else if (stride_i > stride_j) { + hasDecreasingStrides = true; + } + } + + if (hasIncreasingStrides && !hasDecreasingStrides) { + for (int k = 0; k < numInfos; k++) { + IndexType size = sizes[k][i]; + sizes[k][i] = sizes[k][j]; + sizes[k][j] = size; + + IndexType stride = strides[k][i]; + strides[k][i] = strides[k][j]; + strides[k][j] = stride; + } + } + } + } +} + +// The `remaining_steps` argument is used to support Op that operates on +// multiple elements at the same time. Generally, the strategy of ApplyOpN is to +// 1. Initialize `remaining_steps = step`, where `step` is the template arg of +// CUDA_tensor_applyN helpers. The input arg `n` to `apply()` represents the +// number of elements in bound for this call. It will almost always equal to +// `step` except at boundaries. +// 2. If `remaining_steps > 0` convert the current linearIndex to offset (if in +// bound), and recursively call `ApplyOpN` with `remaining_steps - 1`. +// 3. At `remaining_steps = 0`, +// if `step = 1`, call `op(tensor1_val, tensor2_val, ...)`; +// if `step > 1`, call `op(n, tensor1_val1, tensor1_val2, ..., tesor1_valstep, +// tensor2_val1, tensor2_val2, ..., tesor2_valstep, +// ... +// tensorN_val1, tensorN_val2, ..., tesorN_valstep);` +// +// See NOTE [ CUDA_tensor_applyN helpers ] above for how Op may look like. + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... aOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = sizeof...(Offsets) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + ApplyOp1::apply( + a, op, n, linearIndex + 1, aOffsets..., aOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, + int n, IndexType linearIndex, Offset offset) { + op(a.data[offset]); +} +}; + +template +struct ApplyOp1 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, const Op &op, int n, + IndexType linearIndex, Offsets... offsets) { + op(n, a.data[offsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) +#endif +__global__ void kernelPointwiseApply1(detail::TensorInfo a, + IndexType totalElements, const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp1::apply( + a, op, ::min(step, static_cast(totalElements - linearIndex)), linearIndex); + } +} + + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int64_t n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + // Convert `linearIndex` into an offset of `a` + const IndexType aOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, a) : 0; + + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = static_cast(sizeof...(Offsets)) < n ? + detail::IndexToOffset::get(linearIndex, b) : 0; + + ApplyOp2::apply( + a, b, op, n, linearIndex + 1, aOffsets..., aOffset, bOffsets..., bOffset + ); +} +}; + +// Specialize `step=1` case (i.e., `remaining_steps=0` and `len(Offsets)=1`). +// We don't need to pass in how many elements need to processed in this case. +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int /*n*/, IndexType /*linearIndex*/, + Offset aOffset, Offset bOffset) { + op(a.data[aOffset], b.data[bOffset]); +} +}; + +template +struct ApplyOp2 { +__device__ __forceinline__ +static void apply(detail::TensorInfo &a, + detail::TensorInfo &b, + const Op &op, int n, IndexType linearIndex, + Offsets... aOffsets, Offsets... bOffsets) { + op(n, a.data[aOffsets]..., b.data[bOffsets]...); +} +}; + +template +#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) +C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) +#endif +__global__ void +kernelPointwiseApply2(detail::TensorInfo a, + detail::TensorInfo b, + IndexType totalElements, + const Op op) { + for (IndexType linearIndex = (blockIdx.x * blockDim.x + threadIdx.x) * step; + linearIndex < totalElements; + linearIndex += gridDim.x * blockDim.x * step) { + ApplyOp2::apply( + a, b, op, ::min(step, static_cast(totalElements - linearIndex)), + linearIndex); + } +} + +} // anonymous namespace + +template +inline bool CUDA_tensor_apply2(at::TensorBase a, + at::TensorBase b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + TORCH_CHECK(a.device().is_cuda() && b.device().is_cuda(), + "CUDA_tensor_apply2: Expected tensors to have CUDA DeviceType, but got " + "tensors with type ", a.device().type(), " and ", b.device().type()); + int64_t totalElements = a.numel(); + + if (totalElements != b.numel()) { + return false; + } + + if (a.dim() > MAX_TENSORINFO_DIMS || + b.dim() > MAX_TENSORINFO_DIMS) { + return false; + } + + if (a.numel() == 0) { + // Empty tensor; do nothing + return true; + } + const dim3 block = getApplyBlock(max_threads_per_block); + + dim3 grid; + auto curDevice = current_device(); + if (curDevice == -1) return false; + if (!getApplyGrid(totalElements, grid, curDevice, max_threads_per_block)) { + return false; + } + + /* + Expands readable/writable tensors whose indices may be "overlapped." + This ensures that each element of the tensor is operated on once and only + once. + */ + TensorBase oldA; + TensorBase oldB; + + if (aType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(a)) { + // Must perform in contiguous space + oldA = std::exchange(a, a.contiguous()); + } + if (bType == TensorArgType::ReadWrite && detail::maybeOverlappingIndices(b)) { + // Must perform in contiguous space + oldB = std::exchange(b, b.contiguous()); + } + + // It is possible that the tensor dimensions are able to be collapsed, + // and thus we can reduce the actual code complexity of the copy by + // exploiting this knowledge statically, since the div/mod is the + // most expensive part of the operation, more so than memory accesses. + // For instance, when copying a non-contiguous to a contiguous tensor + // (or vice versa), the contiguous tensor can be collapsed to one + // dimension, and the loop to translate the linear index to the array + // index can be similarly collapsed. That is what this unrolling is for. + +#define HANDLE_CASE(TYPE, A, B) \ + kernelPointwiseApply2 \ + <<>>( \ + aInfo, bInfo, static_cast(totalElements), op); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + +#define HANDLE_B_CASE(TYPE, A, B) { \ + switch (B) { \ + case 1: \ + HANDLE_CASE(TYPE, A, 1); \ + break; \ + case 2: \ + HANDLE_CASE(TYPE, A, 2); \ + break; \ + default: \ + HANDLE_CASE(TYPE, A, -1); \ + break; \ + } \ +} + +#define HANDLE_A_CASE(TYPE, A, B) { \ + switch (A) { \ + case 1: \ + HANDLE_B_CASE(TYPE, 1, B); \ + break; \ + case 2: \ + HANDLE_B_CASE(TYPE, 2, B); \ + break; \ + default: \ + HANDLE_B_CASE(TYPE, -1, B); \ + break; \ + } \ +} + + if (detail::canUse32BitIndexMath(a) && + detail::canUse32BitIndexMath(b)) { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims); + } else { + detail::TensorInfo aInfo = + detail::getTensorInfo(a); + + detail::TensorInfo bInfo = + detail::getTensorInfo(b); + rearrangeDims(&aInfo, &bInfo); + aInfo.collapseDims(); + bInfo.collapseDims(); + + /* + Only instantiates the all 1D special case and the fallback all nD case for + large (64-bit indexed) tensors to reduce compilation time. + */ + if (aInfo.dims == 1 && bInfo.dims == 1) { + HANDLE_CASE(uint64_t, 1, 1); + } else { + HANDLE_CASE(uint64_t, -1, -1); + } + } +#undef HANDLE_CASE +#undef HANDLE_B_CASE +#undef HANDLE_A_CASE + + if (oldA.defined()) { + at::native::copy_ignoring_overlaps(oldA, a); + } + + if (oldB.defined()) { + at::native::copy_ignoring_overlaps(oldB, b); + } + + return true; +} + +/* Provides default step = 1 to CUDA_tensor_apply2. */ +template +inline bool CUDA_tensor_apply2(const at::TensorBase &a, + const at::TensorBase &b, + const Op op, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly) { + return CUDA_tensor_apply2(a, b, op, aType, bType); +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h new file mode 100644 index 0000000000000000000000000000000000000000..2c6cef95f79fe81ae5f1906763f8f800a44141d5 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDABlas.h @@ -0,0 +1,358 @@ +#pragma once +/* + Provides a subset of CUDA BLAS functions as templates: + + gemm(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, + ldc) + + gemv(transa, m, n, alpha, a, lda, x, incx, beta, y, incy) + + dot(n, x, incx, y, incy, result) + + where Dtype is double, float, at::Half or at::BFloat16 (ROCm, NOT for dot). + The functions are available in at::cuda::blas namespace. + */ + +#include +#include + +namespace at::cuda::blas { + +// RAII guard that sets the CuBLAS pointer mode and restores it to +// its previous value when the guard is destroyed +class PointerModeGuard { +public: + PointerModeGuard(cublasHandle_t handle, cublasPointerMode_t mode) : + handle(handle) { + TORCH_CUDABLAS_CHECK(cublasGetPointerMode(handle, &previous_mode)); + TORCH_CUDABLAS_CHECK(cublasSetPointerMode(handle, mode)); + } + + ~PointerModeGuard() { + cublasSetPointerMode(handle, previous_mode); + } + +private: + cublasHandle_t handle; + cublasPointerMode_t previous_mode; +}; + +/* LEVEL 3 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, const Dtype *b, int64_t ldb, at::opmath_type beta,\ + Dtype *c, int64_t ldc + +#define CUDABLAS_GEMM_ARGS(Dtype) transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc + +template +inline void gemm(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::gemm: not implemented"); +} + +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +template +inline void gemm_internal(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::gemm_internal: not implemented"); +} + +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(double)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(float)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal>(CUDABLAS_GEMM_ARGTYPES(c10::complex)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::Half)); +template <> +void gemm_internal(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)); + +enum GEMMAndBiasActivationEpilogue { + None, + RELU, + GELU, +}; + +// NOTE: GELU activation is not supported prior to CUDA 11.4 and will +// do nothing if passed in that case. +template +void gemm_and_bias( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + at::opmath_type alpha_val, + const Dtype* mat1_ptr, + int64_t mat1_ld, + const Dtype* mat2_ptr, + int64_t mat2_ld, + const Dtype* bias, + Dtype* result_ptr, + int64_t result_ld, + GEMMAndBiasActivationEpilogue activation = GEMMAndBiasActivationEpilogue::None); + +void int8_gemm( + bool transpose_mat1, + bool transpose_mat2, + int64_t m, + int64_t n, + int64_t k, + const int8_t* mat1_ptr, + int64_t mat1_ld, + const int8_t* mat2_ptr, + int64_t mat2_ld, + int32_t* result_ptr, + int64_t result_ld); + +void scaled_gemm( + char transa, + char transb, + int64_t m, + int64_t n, + int64_t k, + const void* mat1_ptr, + const void* mat1_scale_ptr, + int64_t mat1_ld, + ScalarType mat1_dtype, + const void* mat2_ptr, + const void* mat2_scale_ptr, + int64_t mat2_ld, + ScalarType mat2_dtype, + const void* bias_ptr, + ScalarType bias_dtype, + void* result_ptr, + const void* result_scale_ptr, + int64_t result_ld, + ScalarType result_dtype, + void* amax_ptr, + bool use_fast_accum); + +#define CUDABLAS_BGEMM_ARGTYPES(Dtype) \ + char transa, char transb, int64_t m, int64_t n, int64_t k, at::opmath_type alpha, \ + const Dtype *a, int64_t lda, int64_t stridea, \ + const Dtype *b, int64_t ldb, int64_t strideb, \ + at::opmath_type beta, Dtype *c, int64_t ldc, int64_t stridec, int64_t num_batches + +#define CUDABLAS_BGEMM_ARGS(Dtype) \ + transa, transb, m, n, k, alpha, a, lda, stridea, b, ldb, strideb, beta, c, ldc, stridec, num_batches + +template +inline void bgemm(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::bgemm: not implemented"); +} + +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +template +inline void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::bgemm_internal: not implemented"); +} + +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(double)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(float)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal>(CUDABLAS_BGEMM_ARGTYPES(c10::complex)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::Half)); +template <> +void bgemm_internal(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)); + +#define CUDABLAS_TRSM_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, const Dtype *A, int lda, Dtype *B, int ldb + +template +inline void trsm(CUDABLAS_TRSM_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype), "at::cuda::blas::trsm: not implemented"); +} + +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsm(CUDABLAS_TRSM_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsm>(CUDABLAS_TRSM_ARGTYPES(c10::complex)); + +#define CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasSideMode_t side, cublasFillMode_t uplo, \ + cublasOperation_t trans, cublasDiagType_t diag, int m, int n, \ + const Dtype *alpha, Dtype *A[], int lda, Dtype *B[], int ldb, \ + int batchCount + +template +inline void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype), "at::cuda::blas::trsmBatched: not implemented"); +} + +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void trsmBatched(CUDABLAS_TRSM_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void trsmBatched>(CUDABLAS_TRSM_BATCHED_ARGTYPES(c10::complex)); + +/* LEVEL 2 BLAS FUNCTIONS */ + +#define CUDABLAS_GEMV_ARGTYPES(Dtype) \ + char trans, int64_t m, int64_t n, Dtype alpha, const Dtype *a, int64_t lda, \ + const Dtype *x, int64_t incx, Dtype beta, Dtype *y, int64_t incy + +template +inline void gemv(CUDABLAS_GEMV_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype), "at::cuda::blas::gemv: not implemented"); +} + +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(double)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(float)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv>(CUDABLAS_GEMV_ARGTYPES(c10::complex)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::Half)); +template <> +void gemv(CUDABLAS_GEMV_ARGTYPES(at::BFloat16)); + +/* LEVEL 1 BLAS FUNCTIONS */ + +#define CUDABLAS_DOT_ARGTYPES(Dtype) \ + cublasHandle_t handle, int n, const Dtype *x, int incx, const Dtype *y, \ + int incy, Dtype *result + +template +inline void dot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::dot: not implemented"); +} + +template <> +void dot(CUDABLAS_DOT_ARGTYPES(double)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(float)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::Half)); +template <> +void dot(CUDABLAS_DOT_ARGTYPES(at::BFloat16)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void dot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +template +inline void vdot(CUDABLAS_DOT_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::vdot: not implemented"); +} + +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); +template <> +void vdot>(CUDABLAS_DOT_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRS_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, \ + int n, int nrhs, Dtype** dA_array, int lda, int* ipiv_array, \ + Dtype** dB_array, int ldb, int* info_array, int batchsize + +template +void getrsBatched(CUDABLAS_GETRS_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::getrsBatched: not implemented"); +} +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrsBatched(CUDABLAS_GETRS_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrsBatched>(CUDABLAS_GETRS_ARGTYPES(c10::complex)); + +#define CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, int m, int n, Dtype **A_array, int lda, \ + Dtype **tau_array, int *info, int batchsize + +template +void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype), "at::cuda::blas::geqrfBatched: not implemented"); +} +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(float)); +template <> +TORCH_CUDA_CU_API void geqrfBatched(CUDABLAS_GEQRF_BATCHED_ARGTYPES(double)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); +template <> +TORCH_CUDA_CU_API void geqrfBatched>( + CUDABLAS_GEQRF_BATCHED_ARGTYPES(c10::complex)); + +#define CUDABLAS_GETRF_ARGTYPES(Dtype) \ + int n, Dtype** dA_array, int ldda, int* ipiv_array, int* info_array, int batchsize + +template +void getrfBatched(CUDABLAS_GETRF_ARGTYPES(Dtype)) { + TORCH_CHECK(false, "at::cuda::blas::getrfBatched: not implemented"); +} +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void getrfBatched(CUDABLAS_GETRF_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void getrfBatched>(CUDABLAS_GETRF_ARGTYPES(c10::complex)); + +#define CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype) \ + cublasHandle_t handle, cublasOperation_t trans, int m, int n, int nrhs, Dtype** dA_array, int ldda, Dtype** dC_array, int lddc, int* info, int *devInfoArray, int batchSize + +template +void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(Dtype)) { + static_assert(false&&sizeof(Dtype),"at::cuda::blas::gelsBatched: not implemented"); +} + +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(double)); +template<> +TORCH_CUDA_CU_API void gelsBatched(CUDABLAS_GELS_BATCHED_ARGTYPES(float)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); +template<> +TORCH_CUDA_CU_API void gelsBatched>(CUDABLAS_GELS_BATCHED_ARGTYPES(c10::complex)); + +} // namespace at::cuda::blas diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h new file mode 100644 index 0000000000000000000000000000000000000000..3e9e496c8830cd011fff700907d94e72c55d75ef --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAConfig.h @@ -0,0 +1,19 @@ +#pragma once + +// Test these using #if AT_CUDNN_ENABLED(), not #ifdef, so that it's +// obvious if you forgot to include Config.h +// c.f. https://stackoverflow.com/questions/33759787/generating-an-error-if-checked-boolean-macro-is-not-defined +// +// NB: This header MUST NOT be included from other headers; it should +// only be included from C++ files. +#define AT_CUDNN_ENABLED() 1 +#define AT_CUSPARSELT_ENABLED() 1 +#define AT_ROCM_ENABLED() 0 +#define AT_MAGMA_ENABLED() 1 + +// Needed for hipMAGMA to correctly identify implementation +#if (AT_ROCM_ENABLED() && AT_MAGMA_ENABLED()) +#define HAVE_HIP 1 +#endif + +#define NVCC_FLAGS_EXTRA "-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90" diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h new file mode 100644 index 0000000000000000000000000000000000000000..dc33cb541370f54f8b8b03baadc52709017ca527 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h @@ -0,0 +1,99 @@ +#pragma once +// Light-weight version of CUDAContext.h with fewer transitive includes + +#include + +#include +#include +#include + +// cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also +// added bf16 support +#include + +#ifdef CUDART_VERSION +#include +#endif + +#if defined(USE_CUDSS) +#include +#endif + +#if defined(USE_ROCM) +#include +#endif + +#include +#include + +namespace c10 { +struct Allocator; +} + +namespace at::cuda { + +/* +A common CUDA interface for ATen. + +This interface is distinct from CUDAHooks, which defines an interface that links +to both CPU-only and CUDA builds. That interface is intended for runtime +dispatch and should be used from files that are included in both CPU-only and +CUDA builds. + +CUDAContext, on the other hand, should be preferred by files only included in +CUDA builds. It is intended to expose CUDA functionality in a consistent +manner. + +This means there is some overlap between the CUDAContext and CUDAHooks, but +the choice of which to use is simple: use CUDAContext when in a CUDA-only file, +use CUDAHooks otherwise. + +Note that CUDAContext simply defines an interface with no associated class. +It is expected that the modules whose functions compose this interface will +manage their own state. There is only a single CUDA context/state. +*/ + +/** + * DEPRECATED: use device_count() instead + */ +inline int64_t getNumGPUs() { + return c10::cuda::device_count(); +} + +/** + * CUDA is available if we compiled with CUDA, and there are one or more + * devices. If we compiled with CUDA but there is a driver problem, etc., + * this function will report CUDA is not available (rather than raise an error.) + */ +inline bool is_available() { + return c10::cuda::device_count() > 0; +} + +TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties(); + +TORCH_CUDA_CPP_API int warp_size(); + +TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device); + +TORCH_CUDA_CPP_API bool canDeviceAccessPeer( + c10::DeviceIndex device, + c10::DeviceIndex peer_device); + +TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator(); + +/* Handles */ +TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle(); +TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle(); +TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle(); + +TORCH_CUDA_CPP_API void clearCublasWorkspaces(); + +#if defined(CUDART_VERSION) || defined(USE_ROCM) +TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle(); +#endif + +#if defined(USE_CUDSS) +TORCH_CUDA_CPP_API cudssHandle_t getCurrentCudssHandle(); +#endif + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h new file mode 100644 index 0000000000000000000000000000000000000000..1696bb3a0f4430a21de8261cf066a895d014156d --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h @@ -0,0 +1,105 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +template +cudaDataType getCudaDataType() { + static_assert(false && sizeof(scalar_t), "Cannot convert type to cudaDataType."); + return {}; +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32F; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_16F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_32F; +} +template<> inline cudaDataType getCudaDataType>() { + return CUDA_C_64F; +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8U; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_8I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_32I; +} + +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_64I; +} +template<> inline cudaDataType getCudaDataType() { + return CUDA_R_16BF; +} + +inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) { + switch (scalar_type) { + case c10::ScalarType::Byte: + return CUDA_R_8U; + case c10::ScalarType::Char: + return CUDA_R_8I; + case c10::ScalarType::Int: + return CUDA_R_32I; + case c10::ScalarType::Half: + return CUDA_R_16F; + case c10::ScalarType::Float: + return CUDA_R_32F; + case c10::ScalarType::Double: + return CUDA_R_64F; + case c10::ScalarType::ComplexHalf: + return CUDA_C_16F; + case c10::ScalarType::ComplexFloat: + return CUDA_C_32F; + case c10::ScalarType::ComplexDouble: + return CUDA_C_64F; + case c10::ScalarType::Short: + return CUDA_R_16I; + case c10::ScalarType::Long: + return CUDA_R_64I; + case c10::ScalarType::BFloat16: + return CUDA_R_16BF; +#if defined(CUDA_VERSION) && CUDA_VERSION >= 11080 + case c10::ScalarType::Float8_e4m3fn: + return CUDA_R_8F_E4M3; + case c10::ScalarType::Float8_e5m2: + return CUDA_R_8F_E5M2; +#endif +#if defined(USE_ROCM) +#if defined(HIP_NEW_TYPE_ENUMS) + case c10::ScalarType::Float8_e4m3fnuz: + return HIP_R_8F_E4M3_FNUZ; + case c10::ScalarType::Float8_e5m2fnuz: + return HIP_R_8F_E5M2_FNUZ; +#else + case c10::ScalarType::Float8_e4m3fnuz: + return static_cast(1000); + case c10::ScalarType::Float8_e5m2fnuz: + return static_cast(1001); +#endif +#endif + default: + TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.") + } +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h new file mode 100644 index 0000000000000000000000000000000000000000..ba9a5eb849a091be6e86658a8c7af87a0a3fbb8f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +#include +#include + +namespace at::cuda { + +inline Device getDeviceFromPtr(void* ptr) { + cudaPointerAttributes attr{}; + + AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr)); + +#if !defined(USE_ROCM) + TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered, + "The specified pointer resides on host memory and is not registered with any CUDA device."); +#endif + + return {c10::DeviceType::CUDA, static_cast(attr.device)}; +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..94ce34645b02bf21d30eef42191560962a6c40a3 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAEvent.h @@ -0,0 +1,211 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace at::cuda { + +/* +* CUDAEvents are movable not copyable wrappers around CUDA's events. +* +* CUDAEvents are constructed lazily when first recorded unless it is +* reconstructed from a cudaIpcEventHandle_t. The event has a device, and this +* device is acquired from the first recording stream. However, if reconstructed +* from a handle, the device should be explicitly specified; or if ipc_handle() is +* called before the event is ever recorded, it will use the current device. +* Later streams that record the event must match this device. +*/ +struct TORCH_CUDA_CPP_API CUDAEvent { + // Constructors + // Default value for `flags` is specified below - it's cudaEventDisableTiming + CUDAEvent() noexcept = default; + CUDAEvent(unsigned int flags) noexcept : flags_{flags} {} + + CUDAEvent( + DeviceIndex device_index, const cudaIpcEventHandle_t* handle) : device_index_(device_index) { + CUDAGuard guard(device_index_); + + AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle)); + is_created_ = true; + } + + // Note: event destruction done on creating device to avoid creating a + // CUDA context on other devices. + ~CUDAEvent() { + try { + if (is_created_) { + CUDAGuard guard(device_index_); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_deletion(at::kCUDA, reinterpret_cast(event_)); + } + AT_CUDA_CHECK(cudaEventDestroy(event_)); + } + } catch (...) { /* No throw */ } + } + + CUDAEvent(const CUDAEvent&) = delete; + CUDAEvent& operator=(const CUDAEvent&) = delete; + + CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); } + CUDAEvent& operator=(CUDAEvent&& other) noexcept { + if (this != &other) { + moveHelper(std::move(other)); + } + return *this; + } + + operator cudaEvent_t() const { return event(); } + + // Less than operator (to allow use in sets) + friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) { + return left.event_ < right.event_; + } + + std::optional device() const { + if (is_created_) { + return at::Device(at::kCUDA, device_index_); + } else { + return {}; + } + } + + bool isCreated() const { return is_created_; } + DeviceIndex device_index() const {return device_index_;} + cudaEvent_t event() const { return event_; } + + // Note: cudaEventQuery can be safely called from any device + bool query() const { + if (!is_created_) { + return true; + } + + cudaError_t err = cudaEventQuery(event_); + if (err == cudaSuccess) { + return true; + } else if (err != cudaErrorNotReady) { + C10_CUDA_CHECK(err); + } else { + // ignore and clear the error if not ready + (void)cudaGetLastError(); + } + + return false; + } + + void record() { record(getCurrentCUDAStream()); } + + void recordOnce(const CUDAStream& stream) { + if (!was_recorded_) record(stream); + } + + // Note: cudaEventRecord must be called on the same device as the event. + void record(const CUDAStream& stream) { + if (!is_created_) { + createEvent(stream.device_index()); + } + + TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_, + " does not match recording stream's device ", stream.device_index(), "."); + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventRecord(event_, stream)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_record(at::kCUDA, + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + was_recorded_ = true; + } + + // Note: cudaStreamWaitEvent must be called on the same device as the stream. + // The event has no actual GPU resources associated with it. + void block(const CUDAStream& stream) { + if (is_created_) { + CUDAGuard guard(stream.device_index()); + AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_wait(at::kCUDA, + reinterpret_cast(event_), + reinterpret_cast(stream.stream()) + ); + } + } + } + + // Note: cudaEventElapsedTime can be safely called from any device + float elapsed_time(const CUDAEvent& other) const { + TORCH_CHECK(is_created_ && other.isCreated(), + "Both events must be recorded before calculating elapsed time."); + float time_ms = 0; + // We do not strictly have to set the device index to the same as our event, + // but if we don't and the current device is not initialized, it will + // create a new cuda context, which will consume a lot of memory. + CUDAGuard guard(device_index_); + // raise cudaErrorNotReady if either event is recorded but not yet completed + AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_)); + return time_ms; + } + + // Note: cudaEventSynchronize can be safely called from any device + void synchronize() const { + if (is_created_) { + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_synchronization(at::kCUDA, reinterpret_cast(event_)); + } + AT_CUDA_CHECK(cudaEventSynchronize(event_)); + } + } + + // Note: cudaIpcGetEventHandle must be called on the same device as the event + void ipc_handle(cudaIpcEventHandle_t * handle) { + if (!is_created_) { + // this CUDAEvent object was initially constructed from flags but event_ + // is not created yet. + createEvent(getCurrentCUDAStream().device_index()); + } + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_)); + } + +private: + unsigned int flags_ = cudaEventDisableTiming; + bool is_created_ = false; + bool was_recorded_ = false; + DeviceIndex device_index_ = -1; + cudaEvent_t event_{}; + + void createEvent(DeviceIndex device_index) { + device_index_ = device_index; + CUDAGuard guard(device_index_); + AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_)); + const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); + if (C10_UNLIKELY(interp)) { + (*interp)->trace_gpu_event_creation(at::kCUDA, reinterpret_cast(event_)); + } + is_created_ = true; + } + + void moveHelper(CUDAEvent&& other) { + std::swap(flags_, other.flags_); + std::swap(is_created_, other.is_created_); + std::swap(was_recorded_, other.was_recorded_); + std::swap(device_index_, other.device_index_); + std::swap(event_, other.event_); + } +}; + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe664e35f54c268d01da38c5a47d490f51e52ee --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGeneratorImpl.h @@ -0,0 +1,181 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +namespace at { + +namespace cuda { +struct CUDAGraph; +} + +/** + * Note [CUDA Graph-safe RNG states] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * Strategy: + * ~~~~~~~~~ + * (It helps to look at + * cuda/detail/PhiloxCudaStateRaw.cuh and + * cuda/detail/UnpackRaw.cuh + * while you read this.) + * + * A CUDA graph containing multiple RNG ops behaves like a + * single giant kernel from the perspective of ops external + * to the graph. During graph capture, logic in CUDAGeneratorImpl + * records the total of all offset increments that occur in the + * graphed region, and records the final total as the offset for + * the entire graph. + * + * When the graph reruns, the logic that reruns it + * increments this device's CUDA generator's offset + * by that total. + * + * Meanwhile, within the graph, at capture time, instead of + * populating PhiloxCudaStates with the uint64_t offset pulled + * directly from the global state, PhiloxCudaState uses a pointer + * to a one-element stream-local int64_t device tensor + * holding an initial offset value, and a uint64_t holding an + * intra-graph offset. (The intra-graph offset starts from zero + * when capture begins.) In each consumer kernel, + * at::cuda::philox::unpack computes the offset to use for this kernel + * as intra-graph offset + *initial offset. + * + * When the graph reruns, the logic that reruns it first + * fill_s the initial offset tensor with this device's + * CUDA generator's current offset. + * + * The control flow above ensures graphed execution is bitwise + * identical to eager execution as long as RNG ops are enqueued + * from a single thread, even if RNG ops and graphs containing + * RNG ops are enqueued and run simultaneously on multiple streams. + * + * Usage: + * ~~~~~~ + * PhiloxCudaState in this file, and unpack() in + * cuda/CUDAGraphsUtils.cuh allow non-divergent use of + * CUDAGeneratorImpl whether graph capture is underway or not. + * + * Each PhiloxCudaState instance should be used for one and only one + * consumer kernel. + * + * Example (see e.g. native/cuda/Dropout.cu): + * + * #include + * #include + * + * __global__ void kernel(..., PhiloxCudaState philox_args) { + * auto seeds = at::cuda::philox::unpack(philox_args); + * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; + * curandStatePhilox4_32_10_t state; + * curand_init(std::get<0>(seeds), // seed + * idx, // per-thread subsequence + * std::get<1>(seeds), // offset in subsequence + * &state); + * ... + * } + * + * host_caller(...) { + * PhiloxCudaState rng_engine_inputs; + * { + * // See Note [Acquire lock when using random generators] + * std::lock_guard lock(gen->mutex_); + * + * // gen could be HostState or DevState here! No divergent code needed! + * rng_engine_inputs = gen->philox_cuda_state(offset_increment); + * } + * kernel<<<...>>>(..., rng_engine_inputs); + * } + * + */ + +struct CUDAGeneratorState : public c10::intrusive_ptr_target { + uint64_t seed_; + uint64_t philox_offset_per_thread_; + uint32_t offset_intragraph_; + bool capturing_{}; + std::unordered_set registered_graphs_; + at::TensorBase seed_extragraph_{}; + at::TensorBase offset_extragraph_{}; + + CUDAGeneratorState( + uint64_t seed = default_rng_seed_val, + uint64_t philox_offset_per_thread = 0, + uint32_t offset_intragraph = 0) + : seed_(seed), + philox_offset_per_thread_(philox_offset_per_thread), + offset_intragraph_(offset_intragraph) {} + + void increase(uint64_t increment); + + void register_graph(cuda::CUDAGraph* graph); + void unregister_graph(cuda::CUDAGraph* graph); + + void capture_prologue(); + // capture_epilogue returns the wholegraph_increment + uint64_t capture_epilogue(); + void replay_prologue(uint64_t wholegraph_increment); + c10::intrusive_ptr clone(); +}; + +struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl { + // Constructors + CUDAGeneratorImpl(DeviceIndex device_index = -1); + CUDAGeneratorImpl( + DeviceIndex device_index, + c10::intrusive_ptr state_); + ~CUDAGeneratorImpl() override = default; + + // CUDAGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + void set_offset(uint64_t offset) override; + uint64_t get_offset() const override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void graphsafe_set_state( + const c10::intrusive_ptr& state) override; + c10::intrusive_ptr graphsafe_get_state() const override; + + void set_philox_offset_per_thread(uint64_t offset); + uint64_t philox_offset_per_thread() const; + + void register_graph(cuda::CUDAGraph* graph); + void unregister_graph(cuda::CUDAGraph* graph); + + // Generates a PhiloxCudaState with a specified increment, and increment + // current state + PhiloxCudaState philox_cuda_state(uint64_t increment); + + bool reset_rnn_state() { + return !no_reset_rnn_state_.test_and_set(); + } + + // Temporarily accommodates call sites that use philox_engine_inputs. + // Allows incremental refactor of call sites to use philox_cuda_state. + std::pair philox_engine_inputs(uint64_t increment); + + static c10::DeviceType device_type(); + + private: + CUDAGeneratorImpl* clone_impl() const override; + + c10::intrusive_ptr state_; + std::atomic_flag no_reset_rnn_state_; +}; + +namespace cuda::detail { + +TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator( + DeviceIndex device_index = -1); +TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1); + +} // namespace cuda::detail +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h new file mode 100644 index 0000000000000000000000000000000000000000..de5417301d34298f57cfd98243a4620cd8998593 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace at { + +struct Generator; +struct CUDAGeneratorImpl; +struct CUDAGeneratorState; + +namespace cuda { + +// Standalone way to get a unique mempool id usable as a pool=... argument +// to CUDAGraph::capture_begin +TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle(); + +struct TORCH_CUDA_CPP_API CUDAGraph { + CUDAGraph(); + ~CUDAGraph(); + + static void inc_pending_event_queries(); + static void dec_pending_event_queries(); + static int num_pending_event_queries(); + // See Note [Explicit Registration of Generators to the CUDA Graph] + void register_generator_state(c10::intrusive_ptr state); + void register_generator_state(const at::Generator& generator); + void capture_begin( + MempoolId_t pool = {0, 0}, + cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal); + void capture_end(); + void replay(); + void reset(); + MempoolId_t pool(); + void enable_debug_mode(); + void debug_dump(const std::string& debug_path); + + protected: + cudaGraph_t graph_ = nullptr; + cudaGraphExec_t graph_exec_ = nullptr; + + static std::atomic pending_event_queries; + + // internal states so reset() can do its best cleaning up + // Set to true in capture_end if cudaStreamEndCapture succeeded + // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate + // to create graph_exec_, then graph_ is deleted + bool has_graph_ = false; + // Set to true in capture_end if cudaGraphInstantiate succeeded + bool has_graph_exec_ = false; + + // the ID assigned by cuda during graph capture, + // used to identify when a stream is participating in capture + CaptureId_t capture_id_ = -1; + + // uuid used to request a particular private mempool from CUDACachingAllocator. + // By default, this will be set to {id_, 0}. + // + // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_ + // will be set to the other graph's mempool_id_, and therefore share a mempool with the + // other graph. + // + // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(), + // it will share a mempool with any other captures that used "pool=handle". + // + // Sharing a mempool across graphs saves memory, and it's safe if you + // know you'll replay those graphs in the same order you captured them. + MempoolId_t mempool_id_; + + // Stream on which capture began + at::cuda::CUDAStream capture_stream_; + + // multiple generator states and their wholegraph_increments in this graph + // that are managed by the CUDA Graph + ska::flat_hash_map, uint64_t> + captured_generator_states_; + + // Device where capture occurred. Right now, for simplicity, we require all ops + // in a capture to run on the same device, but this is a limitation of CUDAGraph, + // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device + // captures if needed. + int capture_dev_; +}; + +} // namespace cuda +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d3a5b306eeea49b47d45ae5f28944b6dfd8ac4dc --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraphsUtils.cuh @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +// c10/cuda/CUDAGraphsC10Utils.h has utils used by both c10 and aten. +// This file adds utils used by aten only. + +namespace at::cuda { + +using CaptureId_t = c10::cuda::CaptureId_t; +using CaptureStatus = c10::cuda::CaptureStatus; + +// Use this version where you don't want to create a CUDA context if none exists. +inline CaptureStatus currentStreamCaptureStatus() { + // don't create a context if we don't have to + if (c10::cuda::hasPrimaryContext(c10::cuda::current_device())) { + return c10::cuda::currentStreamCaptureStatusMayInitCtx(); + } else { + return CaptureStatus::None; + } +} + +inline void assertNotCapturing(const std::string& attempt) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + attempt, + " during CUDA graph capture. If you need this call to be captured, " + "please file an issue. " + "Current cudaStreamCaptureStatus: ", + status); +} + +inline void errorIfCapturingCudnnBenchmark(const std::string& version_specific) { + auto status = currentStreamCaptureStatus(); + TORCH_CHECK(status == CaptureStatus::None, + "Current cudaStreamCaptureStatus: ", + status, + "\nCapturing ", + version_specific, + "is prohibited. Possible causes of this error:\n" + "1. No warmup iterations occurred before capture.\n" + "2. The convolutions you're trying to capture use dynamic shapes, " + "in which case capturing them is generally prohibited."); +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h new file mode 100644 index 0000000000000000000000000000000000000000..736fbe4ae50dac94f9022c9e827393d4a7e9cf5a --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#if defined(USE_ROCM) +#include +#define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch) +#endif + +// cuSparse Generic API added in CUDA 10.1 +// Windows support added in CUDA 11.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32))) +#define AT_USE_CUSPARSE_GENERIC_API() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_API() 0 +#endif + +// cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \ + (CUSPARSE_VERSION < 12000) +#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1 +#else +#define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0 +#endif + +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \ + (CUSPARSE_VERSION >= 12000) +#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1 +#else +#define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0 +#endif + +#if defined(USE_ROCM) +// hipSparse const API added in v2.4.0 +#if HIPSPARSE_VERSION >= 200400 +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_GENERIC_API() 1 +#else +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1 +#define AT_USE_HIPSPARSE_GENERIC_API() 1 +#endif +#else // USE_ROCM +#define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0 +#define AT_USE_HIPSPARSE_GENERIC_API() 0 +#endif // USE_ROCM + +// cuSparse Generic API spsv function was added in CUDA 11.3.0 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500) +#define AT_USE_CUSPARSE_GENERIC_SPSV() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSV() 0 +#endif + +// cuSparse Generic API spsm function was added in CUDA 11.3.1 +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600) +#define AT_USE_CUSPARSE_GENERIC_SPSM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SPSM() 0 +#endif + +// cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400) +#if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400) +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 1 +#else +#define AT_USE_CUSPARSE_GENERIC_SDDMM() 0 +#endif + +// BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0) +#if defined(CUDART_VERSION) || defined(USE_ROCM) +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1 +#else +#define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0 +#endif diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h new file mode 100644 index 0000000000000000000000000000000000000000..c99d42c9a7de8852d84167bbca70bc242aabb2b2 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h @@ -0,0 +1,318 @@ +#pragma once + +/* + Provides a subset of cuSPARSE functions as templates: + + csrgeam2(...) + + where scalar_t is double, float, c10::complex or c10::complex. + The functions are available in at::cuda::sparse namespace. +*/ + +#include +#include + +namespace at::cuda::sparse { + +#define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \ + const int *csrSortedColIndC, size_t *pBufferSizeInBytes + +template +inline void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float)); +template <> +void csrgeam2_bufferSizeExt( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); +template <> +void csrgeam2_bufferSizeExt>( + CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex)); + +#define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \ + cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \ + int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \ + const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace + +template +inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) { + TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz( + handle, + m, + n, + descrA, + nnzA, + csrSortedRowPtrA, + csrSortedColIndA, + descrB, + nnzB, + csrSortedRowPtrB, + csrSortedColIndB, + descrC, + csrSortedRowPtrC, + nnzTotalDevHostPtr, + workspace)); +} + +#define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, int nnzA, \ + const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ + const int *csrSortedColIndA, const scalar_t *beta, \ + const cusparseMatDescr_t descrB, int nnzB, \ + const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ + const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ + scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \ + void *pBuffer + +template +inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::csrgeam2: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(float)); +template <> +void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(double)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); +template <> +void csrgeam2>( + CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \ + int kb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc + +template +inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmm: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(float)); +template <> +void bsrmm(CUSPARSE_BSRMM_ARGTYPES(double)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); +template <> +void bsrmm>(CUSPARSE_BSRMM_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nb, int nnzb, \ + const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y + +template +inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrmv: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(float)); +template <> +void bsrmv(CUSPARSE_BSRMV_ARGTYPES(double)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); +template <> +void bsrmv>(CUSPARSE_BSRMV_ARGTYPES(c10::complex)); + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +#define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float)); +template <> +void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsv2_bufferSize>( + CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsv2_analysis>( + CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \ + const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \ + cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsv2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float)); +template <> +void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsv2_solve>( + CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, int *pBufferSizeInBytes + +template +inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float)); +template <> +void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); +template <> +void bsrsm2_bufferSize>( + CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ + const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ + bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_analysis: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float)); +template <> +void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); +template <> +void bsrsm2_analysis>( + CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex)); + +#define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \ + cusparseHandle_t handle, cusparseDirection_t dirA, \ + cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ + int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \ + const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ + int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \ + scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer + +template +inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) { + TORCH_INTERNAL_ASSERT( + false, + "at::cuda::sparse::bsrsm2_solve: not implemented for ", + typeid(scalar_t).name()); +} + +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float)); +template <> +void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); +template <> +void bsrsm2_solve>( + CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex)); + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +} // namespace at::cuda::sparse diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h new file mode 100644 index 0000000000000000000000000000000000000000..36e1530e284fbc33fae63f1d0a5284ed1636a2d1 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseDescriptors.h @@ -0,0 +1,288 @@ +#pragma once + +#include +#include +#include + +#include + +#if defined(USE_ROCM) +#include +#endif + +namespace at::cuda::sparse { + +template +struct CuSparseDescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDASPARSE_CHECK(destructor(x)); + } + } +}; + +template +class CuSparseDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; + +#if AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() +template +struct ConstCuSparseDescriptorDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDASPARSE_CHECK(destructor(x)); + } + } +}; + +template +class ConstCuSparseDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; +#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS || AT_USE_HIPSPARSE_CONST_DESCRIPTORS + +#if defined(USE_ROCM) +using cusparseMatDescr = std::remove_pointer::type; +using cusparseDnMatDescr = std::remove_pointer::type; +using cusparseDnVecDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpMatDescr = std::remove_pointer::type; +using cusparseSpGEMMDescr = std::remove_pointer::type; +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() +using bsrsv2Info = std::remove_pointer::type; +using bsrsm2Info = std::remove_pointer::type; +#endif +#endif + +// NOTE: This is only needed for CUDA 11 and earlier, since CUDA 12 introduced +// API for const descriptors +cusparseStatus_t destroyConstDnMat(const cusparseDnMatDescr* dnMatDescr); + +class TORCH_CUDA_CPP_API CuSparseMatDescriptor + : public CuSparseDescriptor { + public: + CuSparseMatDescriptor() { + cusparseMatDescr_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } + + CuSparseMatDescriptor(bool upper, bool unit) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + cusparseMatDescr_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode)); + TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type)); + descriptor_.reset(raw_descriptor); + } +}; + +#if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() + +class TORCH_CUDA_CPP_API CuSparseBsrsv2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsv2Info() { + bsrsv2Info_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +class TORCH_CUDA_CPP_API CuSparseBsrsm2Info + : public CuSparseDescriptor { + public: + CuSparseBsrsm2Info() { + bsrsm2Info_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +#endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE + +#if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type); + +#if AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() +class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); +}; + +class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseConstDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); + cusparseDnMatDescr* unsafe_mutable_descriptor() const { + return const_cast(descriptor()); + } + cusparseDnMatDescr* unsafe_mutable_descriptor() { + return const_cast(descriptor()); + } +}; + +class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor + : public CuSparseDescriptor { + public: + explicit CuSparseDnVecDescriptor(const Tensor& input); +}; + +class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor + : public CuSparseDescriptor {}; + +#elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() + class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor + : public ConstCuSparseDescriptor< + cusparseDnMatDescr, + &cusparseDestroyDnMat> { + public: + explicit CuSparseDnMatDescriptor( + const Tensor& input, + int64_t batch_offset = -1); + }; + + class TORCH_CUDA_CPP_API CuSparseConstDnMatDescriptor + : public ConstCuSparseDescriptor< + const cusparseDnMatDescr, + &destroyConstDnMat> { + public: + explicit CuSparseConstDnMatDescriptor( + const Tensor& input, + int64_t batch_offset = -1); + cusparseDnMatDescr* unsafe_mutable_descriptor() const { + return const_cast(descriptor()); + } + cusparseDnMatDescr* unsafe_mutable_descriptor() { + return const_cast(descriptor()); + } + }; + + class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor + : public ConstCuSparseDescriptor< + cusparseDnVecDescr, + &cusparseDestroyDnVec> { + public: + explicit CuSparseDnVecDescriptor(const Tensor& input); + }; + + class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor + : public ConstCuSparseDescriptor< + cusparseSpMatDescr, + &cusparseDestroySpMat> {}; +#endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() || AT_USE_HIPSPARSE_CONST_DESCRIPTORS() + +class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor + : public CuSparseSpMatDescriptor { + public: + explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1); + + std::tuple get_size() { + int64_t rows = 0, cols = 0, nnz = 0; + TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize( + this->descriptor(), + &rows, + &cols, + &nnz)); + return std::make_tuple(rows, cols, nnz); + } + + void set_tensor(const Tensor& input) { + auto crow_indices = input.crow_indices(); + auto col_indices = input.col_indices(); + auto values = input.values(); + + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous()); + TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers( + this->descriptor(), + crow_indices.data_ptr(), + col_indices.data_ptr(), + values.data_ptr())); + } + +#if AT_USE_CUSPARSE_GENERIC_SPSV() + void set_mat_fill_mode(bool upper) { + cusparseFillMode_t fill_mode = + upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_FILL_MODE, + &fill_mode, + sizeof(fill_mode))); + } + + void set_mat_diag_type(bool unit) { + cusparseDiagType_t diag_type = + unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; + TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( + this->descriptor(), + CUSPARSE_SPMAT_DIAG_TYPE, + &diag_type, + sizeof(diag_type))); + } +#endif +}; + +#if AT_USE_CUSPARSE_GENERIC_SPSV() +class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSVDescriptor() { + cusparseSpSVDescr_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +#if AT_USE_CUSPARSE_GENERIC_SPSM() +class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpSMDescriptor() { + cusparseSpSMDescr_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; +#endif + +class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor + : public CuSparseDescriptor { + public: + CuSparseSpGEMMDescriptor() { + cusparseSpGEMMDescr_t raw_descriptor = nullptr; + TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } +}; + +#endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() + +} // namespace at::cuda::sparse diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4e89ea1cdb77da1d7866ffe99c64dabfd735d27 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDATensorMethods.cuh @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace at { +template <> +inline __half* Tensor::data() const { + return reinterpret_cast<__half*>(data()); +} +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..d5f65dd6a572f9cab15dbae9983df8037b724a99 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAUtils.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace at::cuda { + +// Check if every tensor in a list of tensors matches the current +// device. +inline bool check_device(ArrayRef ts) { + if (ts.empty()) { + return true; + } + Device curDevice = Device(kCUDA, current_device()); + for (const Tensor& t : ts) { + if (t.device() != curDevice) return false; + } + return true; +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..a7209582b2ba1a7dd69f258d1de3d33da3304f7f --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include + +namespace at::cuda { + +// +// A caching allocator for CUDA host allocations (pinned memory). +// +// This provides a drop-in replacement for THCudaHostAllocator, which re-uses +// freed pinned (page-locked) memory allocations. This avoids device +// synchronizations due to cudaFreeHost calls. +// +// To ensure correct behavior, THCCachingHostAllocator_recordEvent must be +// called anytime a pointer from this allocator is used in a cudaMemcpyAsync +// call between host and device, and passed the corresponding context from the +// allocation. This is currently invoked by at::native::copy_kernel_cuda. +// +TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator(); + +// Records an event in the specified stream. The allocation corresponding to the +// input `ptr`/`ctx` will not be re-used until the event has occurred. +TORCH_CUDA_CPP_API bool CachingHostAllocator_recordEvent( + void* ptr, + void* ctx, + c10::cuda::CUDAStream stream); + +// Releases cached pinned memory allocations via cudaHostFree +TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache(); + +inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) { + return getCachingHostAllocator()->allocate(size); +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c0a2fc47c0069bed4e15beec249162a556f7dc3d --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/DeviceUtils.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include + +__device__ __forceinline__ unsigned int ACTIVE_MASK() +{ +#if !defined(USE_ROCM) + return __activemask(); +#else +// will be ignored anyway + return 0xffffffff; +#endif +} + +__device__ __forceinline__ void WARP_SYNC(unsigned mask = 0xffffffff) { +#if !defined(USE_ROCM) + return __syncwarp(mask); +#endif +} + +#if defined(USE_ROCM) +__device__ __forceinline__ unsigned long long int WARP_BALLOT(int predicate) +{ +return __ballot(predicate); +} +#else +__device__ __forceinline__ unsigned int WARP_BALLOT(int predicate, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __ballot_sync(mask, predicate); +#else + return __ballot(predicate); +#endif +} +#endif + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL(T value, int srcLane, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_sync(mask, value, srcLane, width); +#else + return __shfl(value, srcLane, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_UP(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_up_sync(mask, value, delta, width); +#else + return __shfl_up(value, delta, width); +#endif +} + +template +__device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return __shfl_down_sync(mask, value, delta, width); +#else + return __shfl_down(value, delta, width); +#endif +} + +#if defined(USE_ROCM) +template<> +__device__ __forceinline__ int64_t WARP_SHFL_DOWN(int64_t value, unsigned int delta, int width , unsigned int mask) +{ + //(HIP doesn't support int64_t). Trick from https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ + int2 a = *reinterpret_cast(&value); + a.x = __shfl_down(a.x, delta); + a.y = __shfl_down(a.y, delta); + return *reinterpret_cast(&a); +} +#endif + +template<> +__device__ __forceinline__ c10::Half WARP_SHFL_DOWN(c10::Half value, unsigned int delta, int width, unsigned int mask) +{ + return c10::Half(WARP_SHFL_DOWN(value.x, delta, width, mask), c10::Half::from_bits_t{}); +} + +template +__device__ __forceinline__ c10::complex WARP_SHFL_DOWN(c10::complex value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if !defined(USE_ROCM) + return c10::complex( + __shfl_down_sync(mask, value.real_, delta, width), + __shfl_down_sync(mask, value.imag_, delta, width)); +#else + return c10::complex( + __shfl_down(value.real_, delta, width), + __shfl_down(value.imag_, delta, width)); +#endif +} + +/** + * For CC 3.5+, perform a load using __ldg + */ +template +__device__ __forceinline__ T doLdg(const T* p) { +#if __CUDA_ARCH__ >= 350 && !defined(USE_ROCM) + return __ldg(p); +#else + return *p; +#endif +} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd88a94b75d2ca72133c253eca800295a1771aa --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/EmptyTensor.h @@ -0,0 +1,44 @@ +#pragma once +#include + +namespace at::detail { + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + ScalarType dtype, + std::optional device_opt, + std::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt, + std::optional memory_format_opt); + +TORCH_CUDA_CPP_API TensorBase empty_cuda( + IntArrayRef size, + const TensorOptions &options); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + ScalarType dtype, + std::optional device_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + std::optional dtype_opt, + std::optional layout_opt, + std::optional device_opt, + std::optional pin_memory_opt); + +TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( + IntArrayRef size, + IntArrayRef stride, + const TensorOptions &options); + + +} // namespace at::detail diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..47d64e2bf3126c825e410678330647cb8ffbe919 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include + +#ifdef CUDART_VERSION +#include +#endif + +#if defined(USE_CUDSS) +#include +#endif + +#include +#include +#include + + +namespace c10 { + +class CuDNNError : public c10::Error { + using Error::Error; +}; + +} // namespace c10 + +#define AT_CUDNN_FRONTEND_CHECK(EXPR, ...) \ + do { \ + auto error_object = EXPR; \ + if (!error_object.is_good()) { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN Frontend error: ", error_object.get_message()); \ + } \ + } while (0) \ + +#define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__) + +// See Note [CHECK macro] +#define AT_CUDNN_CHECK(EXPR, ...) \ + do { \ + cudnnStatus_t status = EXPR; \ + if (status != CUDNN_STATUS_SUCCESS) { \ + if (status == CUDNN_STATUS_NOT_SUPPORTED) { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", \ + cudnnGetErrorString(status), \ + ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \ + } else { \ + TORCH_CHECK_WITH(CuDNNError, false, \ + "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \ + } \ + } \ + } while (0) + +namespace at::cuda::blas { +C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error); +} // namespace at::cuda::blas + +#define TORCH_CUDABLAS_CHECK(EXPR) \ + do { \ + cublasStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \ + "CUDA error: ", \ + at::cuda::blas::_cublasGetErrorEnum(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +const char *cusparseGetErrorString(cusparseStatus_t status); + +#define TORCH_CUDASPARSE_CHECK(EXPR) \ + do { \ + cusparseStatus_t __err = EXPR; \ + TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \ + "CUDA error: ", \ + cusparseGetErrorString(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +#if defined(USE_CUDSS) +namespace at::cuda::cudss { +C10_EXPORT const char* cudssGetErrorMessage(cudssStatus_t error); +} // namespace at::cuda::solver + +#define TORCH_CUDSS_CHECK(EXPR) \ + do { \ + cudssStatus_t __err = EXPR; \ + if (__err == CUDSS_STATUS_EXECUTION_FAILED) { \ + TORCH_CHECK_LINALG( \ + false, \ + "cudss error: ", \ + at::cuda::cudss::cudssGetErrorMessage(__err), \ + ", when calling `" #EXPR "`", \ + ". This error may appear if the input matrix contains NaN. ");\ + } else { \ + TORCH_CHECK( \ + __err == CUDSS_STATUS_SUCCESS, \ + "cudss error: ", \ + at::cuda::cudss::cudssGetErrorMessage(__err), \ + ", when calling `" #EXPR "`. "); \ + } \ + } while (0) +#else +#define TORCH_CUDSS_CHECK(EXPR) EXPR +#endif + +// cusolver related headers are only supported on cuda now +#ifdef CUDART_VERSION + +namespace at::cuda::solver { +C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status); + +constexpr const char* _cusolver_backend_suggestion = \ + "If you keep seeing this error, you may use " \ + "`torch.backends.cuda.preferred_linalg_library()` to try " \ + "linear algebra operators with other supported backends. " \ + "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library"; + +} // namespace at::cuda::solver + +// When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. +// When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue. +#define TORCH_CUSOLVER_CHECK(EXPR) \ + do { \ + cusolverStatus_t __err = EXPR; \ + if ((CUDA_VERSION < 11500 && \ + __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \ + (CUDA_VERSION >= 11500 && \ + __err == CUSOLVER_STATUS_INVALID_VALUE)) { \ + TORCH_CHECK_LINALG( \ + false, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`", \ + ". This error may appear if the input matrix contains NaN. ", \ + at::cuda::solver::_cusolver_backend_suggestion); \ + } else { \ + TORCH_CHECK( \ + __err == CUSOLVER_STATUS_SUCCESS, \ + "cusolver error: ", \ + at::cuda::solver::cusolverGetErrorMessage(__err), \ + ", when calling `" #EXPR "`. ", \ + at::cuda::solver::_cusolver_backend_suggestion); \ + } \ + } while (0) + +#else +#define TORCH_CUSOLVER_CHECK(EXPR) EXPR +#endif + +#define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR) + +// For CUDA Driver API +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#if !defined(USE_ROCM) + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + const char* err_str; \ + CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \ + if (get_error_str_err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: unknown error"); \ + } else { \ + AT_ERROR("CUDA driver error: ", err_str); \ + } \ + } \ + } while (0) + +#else + +#define AT_CUDA_DRIVER_CHECK(EXPR) \ + do { \ + CUresult __err = EXPR; \ + if (__err != CUDA_SUCCESS) { \ + AT_ERROR("CUDA driver error: ", static_cast(__err)); \ + } \ + } while (0) + +#endif + +// For CUDA NVRTC +// +// Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE, +// incorrectly produces the error string "NVRTC unknown error." +// The following maps it correctly. +// +// This is here instead of in c10 because NVRTC is loaded dynamically via a stub +// in ATen, and we need to use its nvrtcGetErrorString. +// See NOTE [ USE OF NVRTC AND DRIVER API ]. +#define AT_CUDA_NVRTC_CHECK(EXPR) \ + do { \ + nvrtcResult __err = EXPR; \ + if (__err != NVRTC_SUCCESS) { \ + if (static_cast(__err) != 7) { \ + AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \ + } else { \ + AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \ + } \ + } \ + } while (0) diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7081e94837caa7d5050128e0bfe19aa67f93cd39 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/NumericLimits.cuh @@ -0,0 +1,121 @@ +#pragma once + +#include +#include +#include +#include + +// NumericLimits.cuh is a holder for numeric limits definitions of commonly used +// types. This header is very specific to ROCm HIP and may be removed in the future. +// This header is derived from the legacy THCNumerics.cuh. + +// The lower_bound and upper_bound constants are same as lowest and max for +// integral types, but are -inf and +inf for floating point types. They are +// useful in implementing min, max, etc. + +namespace at { + +template +struct numeric_limits { +}; + +// WARNING: the following at::numeric_limits definitions are there only to support +// HIP compilation for the moment. Use std::numeric_limits if you are not +// compiling for ROCm. +// from @colesbury: "The functions on numeric_limits aren't marked with +// __device__ which is why they don't work with ROCm. CUDA allows them +// because they're constexpr." + +namespace { + // ROCm doesn't like INFINITY too. + constexpr double inf = INFINITY; +} + +template <> +struct numeric_limits { + static inline __host__ __device__ bool lowest() { return false; } + static inline __host__ __device__ bool max() { return true; } + static inline __host__ __device__ bool lower_bound() { return false; } + static inline __host__ __device__ bool upper_bound() { return true; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ uint8_t lowest() { return 0; } + static inline __host__ __device__ uint8_t max() { return UINT8_MAX; } + static inline __host__ __device__ uint8_t lower_bound() { return 0; } + static inline __host__ __device__ uint8_t upper_bound() { return UINT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int8_t lowest() { return INT8_MIN; } + static inline __host__ __device__ int8_t max() { return INT8_MAX; } + static inline __host__ __device__ int8_t lower_bound() { return INT8_MIN; } + static inline __host__ __device__ int8_t upper_bound() { return INT8_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int16_t lowest() { return INT16_MIN; } + static inline __host__ __device__ int16_t max() { return INT16_MAX; } + static inline __host__ __device__ int16_t lower_bound() { return INT16_MIN; } + static inline __host__ __device__ int16_t upper_bound() { return INT16_MAX; } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ int32_t lowest() { return INT32_MIN; } + static inline __host__ __device__ int32_t max() { return INT32_MAX; } + static inline __host__ __device__ int32_t lower_bound() { return INT32_MIN; } + static inline __host__ __device__ int32_t upper_bound() { return INT32_MAX; } +}; + +template <> +struct numeric_limits { +#ifdef _MSC_VER + static inline __host__ __device__ int64_t lowest() { return _I64_MIN; } + static inline __host__ __device__ int64_t max() { return _I64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return _I64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return _I64_MAX; } +#else + static inline __host__ __device__ int64_t lowest() { return INT64_MIN; } + static inline __host__ __device__ int64_t max() { return INT64_MAX; } + static inline __host__ __device__ int64_t lower_bound() { return INT64_MIN; } + static inline __host__ __device__ int64_t upper_bound() { return INT64_MAX; } +#endif +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::Half lowest() { return at::Half(0xFBFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half max() { return at::Half(0x7BFF, at::Half::from_bits()); } + static inline __host__ __device__ at::Half lower_bound() { return at::Half(0xFC00, at::Half::from_bits()); } + static inline __host__ __device__ at::Half upper_bound() { return at::Half(0x7C00, at::Half::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ at::BFloat16 lowest() { return at::BFloat16(0xFF7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 max() { return at::BFloat16(0x7F7F, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 lower_bound() { return at::BFloat16(0xFF80, at::BFloat16::from_bits()); } + static inline __host__ __device__ at::BFloat16 upper_bound() { return at::BFloat16(0x7F80, at::BFloat16::from_bits()); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ float lowest() { return -FLT_MAX; } + static inline __host__ __device__ float max() { return FLT_MAX; } + static inline __host__ __device__ float lower_bound() { return -static_cast(inf); } + static inline __host__ __device__ float upper_bound() { return static_cast(inf); } +}; + +template <> +struct numeric_limits { + static inline __host__ __device__ double lowest() { return -DBL_MAX; } + static inline __host__ __device__ double max() { return DBL_MAX; } + static inline __host__ __device__ double lower_bound() { return -inf; } + static inline __host__ __device__ double upper_bound() { return inf; } +}; + +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h new file mode 100644 index 0000000000000000000000000000000000000000..1abf1dcfc12447d7226df19701486cd6bc2affee --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h @@ -0,0 +1,11 @@ +#include +#include + +namespace at::cuda { +namespace detail { +void init_p2p_access_cache(int64_t num_devices); +} + +TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev); + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..0b161b70d96f428512cf6c26c3acc1654da74bf0 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh @@ -0,0 +1,4 @@ +#pragma once + +#include +#include diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..854f5d8dd1297e3b99fe347358291c9a97c37e1e --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() { + return getCachingHostAllocator(); +} +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..fd8b9e91d48815761e7234b0d6fbcaab7f62fcee --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include + +// Collection of in-kernel scan / prefix sum utilities + +namespace at::cuda { + +// Inclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) { + // Within-warp, we use warp voting. +#if defined (USE_ROCM) + unsigned long long int vote = WARP_BALLOT(in); + T index = __popcll(getLaneMaskLe() & vote); + T carry = __popcll(vote); +#else + T vote = WARP_BALLOT(in); + T index = __popc(getLaneMaskLe() & vote); + T carry = __popc(vote); +#endif + + int warp = threadIdx.x / C10_WARP_SIZE; + + // Per each warp, write out a value + if (getLaneId() == 0) { + smem[warp] = carry; + } + + __syncthreads(); + + // Sum across warps in one thread. This appears to be faster than a + // warp shuffle scan for CC 3.0+ + if (threadIdx.x == 0) { + int current = 0; + for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) { + T v = smem[i]; + smem[i] = binop(smem[i], current); + current = binop(current, v); + } + } + + __syncthreads(); + + // load the carry from the preceding warp + if (warp >= 1) { + index = binop(index, smem[warp - 1]); + } + + *out = index; + + if (KillWARDependency) { + __syncthreads(); + } +} + +// Exclusive prefix sum for binary vars using intra-warp voting + +// shared memory +template +__device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) { + inclusiveBinaryPrefixScan(smem, in, out, binop); + + // Inclusive to exclusive + *out -= (T) in; + + // The outgoing carry for all threads is the last warp's sum + *carry = smem[at::ceil_div(blockDim.x, C10_WARP_SIZE) - 1]; + + if (KillWARDependency) { + __syncthreads(); + } +} + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h new file mode 100644 index 0000000000000000000000000000000000000000..ef5e83a832f739e19f13837500824c984013812e --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/Sleep.h @@ -0,0 +1,13 @@ +#pragma once +#include +#include + +namespace at::cuda { + +// enqueues a kernel that spins for the specified number of cycles +TORCH_CUDA_CU_API void sleep(int64_t cycles); + +// flushes instruction cache for ROCm; no-op for CUDA +TORCH_CUDA_CU_API void flush_icache(); + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..85783c303534ec9f6190c2d6bb44a8faafd680f7 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/ThrustAllocator.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +/// Allocator for Thrust to re-route its internal device allocations +/// to the THC allocator +class ThrustAllocator { +public: + typedef char value_type; + + char* allocate(std::ptrdiff_t size) { + return static_cast(c10::cuda::CUDACachingAllocator::raw_alloc(size)); + } + + void deallocate(char* p, size_t size) { + c10::cuda::CUDACachingAllocator::raw_delete(p); + } +}; + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh new file mode 100644 index 0000000000000000000000000000000000000000..daa7f311ff236ed9af98ca8a002faa28003d270d --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.cuh @@ -0,0 +1,405 @@ +#pragma once +#include + +#include +#include +#include +#include + +#include + +#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE() + +#include + +#else + +// include cub in a safe manner, see: +// https://github.com/pytorch/pytorch/pull/55292 +#undef CUB_NS_POSTFIX //undef to avoid redefinition warnings +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER +#define CUB_NS_PREFIX namespace at_cuda_detail { +#define CUB_NS_POSTFIX } +#define CUB_NS_QUALIFIER ::at_cuda_detail::cub +#include +#undef CUB_NS_POSTFIX +#undef CUB_NS_PREFIX +#undef CUB_NS_QUALIFIER + +#endif + +#include +#include +#include + +// handle the temporary storage and 'twice' calls for cub API +#define CUB_WRAPPER(func, ...) do { \ + size_t temp_storage_bytes = 0; \ + func(nullptr, temp_storage_bytes, __VA_ARGS__); \ + auto& caching_allocator = *::c10::cuda::CUDACachingAllocator::get(); \ + auto temp_storage = caching_allocator.allocate(temp_storage_bytes); \ + func(temp_storage.get(), temp_storage_bytes, __VA_ARGS__); \ + AT_CUDA_CHECK(cudaGetLastError()); \ +} while (false) + +#ifdef USE_ROCM +#define NO_ROCM(x) +#define ROCM_HIPCUB(x) ::hipcub +#else +#define NO_ROCM(x) x +#define ROCM_HIPCUB(x) x +#endif + +#if (!defined(USE_ROCM) && !CUB_SUPPORTS_NV_BFLOAT16()) || defined(USE_ROCM) + +#if !defined(USE_ROCM) +namespace at_cuda_detail { +#endif + +// backport https://github.com/NVIDIA/cub/pull/306 for c10::BFloat16 + +template <> +struct ROCM_HIPCUB(cub)::FpLimits +{ + static __host__ __device__ __forceinline__ c10::BFloat16 Max() { + unsigned short max_word = 0x7F7F; + return reinterpret_cast(max_word); + } + + static __host__ __device__ __forceinline__ c10::BFloat16 Lowest() { + unsigned short lowest_word = 0xFF7F; + return reinterpret_cast(lowest_word); + } +}; + +template <> +struct ROCM_HIPCUB(cub)::NumericTraits: + ROCM_HIPCUB(cub)::BaseTraits {}; + +#if !defined(USE_ROCM) +} // namespace at_cuda_detail +#endif + +#endif + +#if !defined(USE_ROCM) +namespace at::native { +namespace cub = ::at_cuda_detail::cub; +} // namespace at::native +#endif + +namespace at::cuda::cub { + +namespace detail { + +template +struct cuda_type { + using type = T; +}; +template<> +struct cuda_type { + using type = __half; +}; + +#if !defined(USE_ROCM) && CUB_SUPPORTS_NV_BFLOAT16() + +template<> +struct cuda_type { + using type = __nv_bfloat16; +}; + +#elif defined(USE_ROCM) + +template<> +struct cuda_type { + using type = hip_bfloat16; +}; + +#endif + +} // namespace detail + +template +inline void segmented_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t num_elements, int64_t num_segments, + OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, + bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8 +) { + TORCH_CHECK(num_elements <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + TORCH_CHECK(num_segments <= std::numeric_limits::max(), + "cub sort does not support sorting more than INT_MAX elements"); + using key_t_ = typename detail::cuda_type::type; + + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + + if (keys_out == nullptr) { + keys_out_owner = allocator->allocate(num_elements * sizeof(key_t)); + keys_out = reinterpret_cast(keys_out_owner.get()); + } + + const key_t_ *keys_in_ = reinterpret_cast(keys_in); + key_t_ *keys_out_ = reinterpret_cast(keys_out); + + if (descending) { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairsDescending, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } else { + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSegmentedRadixSort::SortPairs, + keys_in_, keys_out_, values_in, values_out, + num_elements, num_segments, begin_offsets, end_offsets, + begin_bit, end_bit, c10::cuda::getCurrentCUDAStream()); + } +} + +#if CUB_SUPPORTS_UNIQUE_BY_KEY() +template +inline void unique_by_key( + KeysInputIteratorT keys_in, ValuesInputIteratorT values_in, + ValuesOutputIteratorT values_out, + NumSelectedIteratorT num_selected, int64_t num_input_items) +{ + // TODO: use thrust::discard_iterator to handle null keys_out when https://github.com/NVIDIA/cub/issues/406 is fixed. + using KeyT = typename std::iterator_traits::value_type; + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr keys_out_owner; + keys_out_owner = allocator->allocate(num_input_items * sizeof(KeyT)); + auto keys_out_ = static_cast(keys_out_owner.get()); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::UniqueByKey, + keys_in, values_in, keys_out_, values_out, num_selected, num_input_items, c10::cuda::getCurrentCUDAStream()); +} +#endif + +namespace impl { + +template +C10_LAUNCH_BOUNDS_1(1) +__global__ void transform_vals(InputIteratorT1 a, InputIteratorT2 b, OutputIteratorT out, ScanOpT scan_op){ + // NOTE: out here not the final scan output, but an intermediate of the accumulation type. + using acc_t = typename std::iterator_traits::value_type; + *out = scan_op(static_cast(*a), static_cast(*b)); +} + +#if !CUB_SUPPORTS_FUTURE_VALUE() +template +struct chained_iterator { + using iterator_category = std::random_access_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = ValueT; + using pointer = ValueT*; + using reference = ValueT&; + + InputIteratorT iter; + ValueT *first; + difference_type offset = 0; + + __device__ ValueT operator[](difference_type i) { + i += offset; + if (i == 0) { + return *first; + } else { + return ValueT(iter[i - 1]); + } + } + __device__ chained_iterator operator+(difference_type i) { + return chained_iterator{iter, first, i}; + } + __device__ ValueT operator*() { + return (*this)[0]; + } +}; +#endif + +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +constexpr int max_cub_size = std::numeric_limits::max() / 2 + 1; // 2**30 +} + +// non synchronizing cub call +// even though cub is supposed to support tensors with int_max elements, in reality it doesn't, +// so split at int_max/2 +template +inline void inclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, int64_t num_items) { +#if defined(USE_ROCM) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input, + output, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + using input_t = typename std::iterator_traits::value_type; + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(input_t)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + using ArgIndexInputIterator = NO_ROCM(at_cuda_detail)::cub::ArgIndexInputIterator; + using tuple = typename ArgIndexInputIterator::value_type; + auto input_iter_transform = [=] __device__ (const tuple &x)->input_t { + if (x.key == 0) { + return *first_elem_ptr; + } else { + return x.value; + } + }; + auto input_ = NO_ROCM(at_cuda_detail)::cub::TransformInputIterator( + ArgIndexInputIterator(input + i), input_iter_transform); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i + 1, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +template +inline void exclusive_scan(InputIteratorT input, OutputIteratorT output, ScanOpT scan_op, InitValueT init_value, int64_t num_items) { +#if defined(USE_ROCM) + //For ROCm, use hipCUB chained iterators + CUB_WRAPPER(NO_ROCM(detail)::hipcub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + num_items, + at::cuda::getCurrentCUDAStream()); + C10_HIP_KERNEL_LAUNCH_CHECK(); +#else + // non synchronizing cub call + // even though cub is supposed to support tensors with int_max elements, in reality it doesn't, + // so split at int_max/2 + int size_cub = std::min(num_items, max_cub_size); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input, + output, + scan_op, + init_value, + size_cub, + at::cuda::getCurrentCUDAStream()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + for (int64_t i = max_cub_size; i < num_items; i += max_cub_size) { + auto allocator = c10::cuda::CUDACachingAllocator::get(); + c10::DataPtr first_elem = allocator->allocate(sizeof(InitValueT)); + auto first_elem_ptr = reinterpret_cast(first_elem.get()); + + size_cub = std::min(num_items - i, max_cub_size); + impl::transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output + i - 1, + input + i - 1, + first_elem_ptr, + scan_op); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +#if !CUB_SUPPORTS_FUTURE_VALUE() + auto input_ = impl::chained_iterator{ + input + i, first_elem_ptr}; + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::InclusiveScan, + input_, + output + i, + scan_op, + size_cub, + at::cuda::getCurrentCUDAStream()); +#else + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceScan::ExclusiveScan, + input + i, + output + i, + scan_op, + ::at_cuda_detail::cub::FutureValue(first_elem_ptr), + size_cub, + at::cuda::getCurrentCUDAStream()); +#endif + } +#endif +} + +#if CUB_SUPPORTS_SCAN_BY_KEY() + +template +inline void inclusive_sum_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveSumByKey, + keys, input, output, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +template +inline void inclusive_scan_by_key(KeysInputIteratorT keys, ValuesInputIteratorT input, ValuesOutputIteratorT output, ScanOpT scan_op, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub InclusiveSumByKey does not support more than INT_MAX elements"); + CUB_WRAPPER(at_cuda_detail::cub::DeviceScan::InclusiveScanByKey, + keys, input, output, scan_op, num_items, at_cuda_detail::cub::Equality(), at::cuda::getCurrentCUDAStream()); +} + +#endif + +template +void unique(InputIteratorT input, OutputIteratorT output, + NumSelectedIteratorT num_selected_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub unique does not support more than INT_MAX elements"); + CUB_WRAPPER(NO_ROCM(at_cuda_detail)::cub::DeviceSelect::Unique, + input, output, num_selected_out, num_items, at::cuda::getCurrentCUDAStream()); +} + +template +void run_length_encode(InputIteratorT input, OutputIteratorT output, CountsOutputIteratorT counts_out, + LengthOutputIteratorT length_out, int64_t num_items) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub run_length_encode does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceRunLengthEncode::Encode, + input, output, counts_out, length_out, num_items, + at::cuda::getCurrentCUDAStream()); +} + +template +void reduce(InputIteratorT input, OutputIteratorT output, int64_t num_items, ReductionOpT op, T init) { + TORCH_CHECK(num_items <= std::numeric_limits::max(), + "cub reduce does not support more than INT_MAX elements"); + CUB_WRAPPER( + NO_ROCM(at_cuda_detail)::cub::DeviceReduce::Reduce, + input, output, num_items, op, init, + at::cuda::getCurrentCUDAStream()); + +} + +} // namespace at::cuda::cub diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h new file mode 100644 index 0000000000000000000000000000000000000000..b18f1438a7088ccea4ae5cc9e1ddb114d7941015 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h @@ -0,0 +1,87 @@ +#pragma once +#include +#include +#include + +// NOTE: These templates are intentionally not defined in this header, +// which aviods re-compiling them for each translation unit. If you get +// a link error, you need to add an explicit instantiation for your +// types in cub.cu + +namespace at::cuda::cub { + +inline int get_num_bits(uint64_t max_key) { + int num_bits = 1; + while (max_key > 1) { + max_key >>= 1; + num_bits++; + } + return num_bits; +} + +namespace detail { + +// radix_sort_pairs doesn't interact with value_t other than to copy +// the data, so we can save template instantiations by reinterpreting +// it as an opaque type. +template struct alignas(N) OpaqueType { char data[N]; }; + +template +void radix_sort_pairs_impl( + const key_t *keys_in, key_t *keys_out, + const OpaqueType *values_in, OpaqueType *values_out, + int64_t n, bool descending, int64_t begin_bit, int64_t end_bit); + +} // namespace detail + +template +void radix_sort_pairs( + const key_t *keys_in, key_t *keys_out, + const value_t *values_in, value_t *values_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) { + static_assert(std::is_trivially_copyable_v || + AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types + "radix_sort_pairs value type must be trivially copyable"); + // Make value type opaque, so all inputs of a certain size use the same template instantiation + using opaque_t = detail::OpaqueType; + static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0, + "This size of value_t is not instantiated. Please instantiate it in cub.cu" + " and modify this check."); + static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned"); + detail::radix_sort_pairs_impl( + keys_in, keys_out, + reinterpret_cast(values_in), + reinterpret_cast(values_out), + n, descending, begin_bit, end_bit); +} + +template +void radix_sort_keys( + const key_t *keys_in, key_t *keys_out, + int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8); + +// NOTE: Intermediate sums will be truncated to input_t precision +template +void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n); + +template +void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return inclusive_sum_truncating(input, output, n); +} + +// NOTE: Sums are done is common_type +template +void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n); + +template +void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { + return exclusive_sum_in_common_type(input, output, n); +} + +void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n); +inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) { + return mask_exclusive_sum( + reinterpret_cast(mask), output_idx, n); +} + +} // namespace at::cuda::cub diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh new file mode 100644 index 0000000000000000000000000000000000000000..55b2f21dd1b48c77ccb96ce7de82f9a28ba3725a --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh @@ -0,0 +1,53 @@ +#pragma once + +#if !defined(USE_ROCM) +#include // for CUDA_VERSION +#endif + +#if !defined(USE_ROCM) +#include +#else +#define CUB_VERSION 0 +#endif + +// cub sort support for __nv_bfloat16 is added to cub 1.13 in: +// https://github.com/NVIDIA/cub/pull/306 +#if CUB_VERSION >= 101300 +#define CUB_SUPPORTS_NV_BFLOAT16() true +#else +#define CUB_SUPPORTS_NV_BFLOAT16() false +#endif + +// cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in: +// https://github.com/NVIDIA/cub/pull/326 +// CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake +// starting from CUDA 11.5 +#if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE) +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true +#else +#define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false +#endif + +// cub support for UniqueByKey is added to cub 1.16 in: +// https://github.com/NVIDIA/cub/pull/405 +#if CUB_VERSION >= 101600 +#define CUB_SUPPORTS_UNIQUE_BY_KEY() true +#else +#define CUB_SUPPORTS_UNIQUE_BY_KEY() false +#endif + +// cub support for scan by key is added to cub 1.15 +// in https://github.com/NVIDIA/cub/pull/376 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_SCAN_BY_KEY() 1 +#else +#define CUB_SUPPORTS_SCAN_BY_KEY() 0 +#endif + +// cub support for cub::FutureValue is added to cub 1.15 in: +// https://github.com/NVIDIA/cub/pull/305 +#if CUB_VERSION >= 101500 +#define CUB_SUPPORTS_FUTURE_VALUE() true +#else +#define CUB_SUPPORTS_FUTURE_VALUE() false +#endif diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..c23998fda56b678c45d354c57196387178cf4753 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/CUDAHooks.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +#include +#include + +// TODO: No need to have this whole header, we can just put it all in +// the cpp file + +namespace at::cuda::detail { + +// Set the callback to initialize Magma, which is set by +// torch_cuda_cu. This indirection is required so magma_init is called +// in the same library where Magma will be used. +TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)()); + + +// The real implementation of CUDAHooksInterface +struct CUDAHooks : public at::CUDAHooksInterface { + CUDAHooks(at::CUDAHooksArgs) {} + void initCUDA() const override; + Device getDeviceFromPtr(void* data) const override; + bool isPinnedPtr(const void* data) const override; + const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override; + bool hasCUDA() const override; + bool hasMAGMA() const override; + bool hasCuDNN() const override; + bool hasCuSOLVER() const override; + bool hasCuBLASLt() const override; + bool hasROCM() const override; + const at::cuda::NVRTC& nvrtc() const override; + DeviceIndex current_device() const override; + bool hasPrimaryContext(DeviceIndex device_index) const override; + Allocator* getCUDADeviceAllocator() const override; + Allocator* getPinnedMemoryAllocator() const override; + bool compiledWithCuDNN() const override; + bool compiledWithMIOpen() const override; + bool supportsDilatedConvolutionWithCuDNN() const override; + bool supportsDepthwiseConvolutionWithCuDNN() const override; + bool supportsBFloat16ConvolutionWithCuDNNv8() const override; + bool hasCUDART() const override; + long versionCUDART() const override; + long versionCuDNN() const override; + std::string showConfig() const override; + double batchnormMinEpsilonCuDNN() const override; + int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex device_index) const override; + void cuFFTSetPlanCacheMaxSize(DeviceIndex device_index, int64_t max_size) const override; + int64_t cuFFTGetPlanCacheSize(DeviceIndex device_index) const override; + void cuFFTClearPlanCache(DeviceIndex device_index) const override; + int getNumGPUs() const override; +#ifdef USE_ROCM + bool isGPUArch(DeviceIndex device_index, const std::vector& archs) const override; +#endif + void deviceSynchronize(DeviceIndex device_index) const override; +}; + +} // at::cuda::detail diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h new file mode 100644 index 0000000000000000000000000000000000000000..1f80c863b63944a25aacb3aa8b95d0b82b6c110b --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h @@ -0,0 +1,151 @@ +// Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states. +// These handles are tied to device, and these libraries requires/recommends not to +// share handles across host threads. +// +// These libraries recommend using one handle per host thread. We may not want to do +// this because threads are relatively light-weight, but creating and destroying +// handles is expensive (destroying the handle causes synchronizations). DataParallel, +// for example, creates new threads for each forward pass. +// +// This file implements a handle pool mechanism. The handle pool returns handles on +// demand as threads request them. If all existing handles in the pool are in use, +// it creates a new one. As threads terminate, they release handles back into the pool. +// In this way, the handle pool never creates more handles than the high-water mark of +// active threads, so it's efficient with DataParallel. + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace at::cuda { namespace { + +template +struct DeviceThreadHandlePool : public std::enable_shared_from_this> { + + struct Handle { + Handle_t handle; + Handle(bool create = false) : handle(nullptr) + { + if(create) Create(&handle); + } + // std::vector.emplace() and push_back() may route through temporaries and call + // copy/move constructors along the way. If this is the case, we don't want + // the destructors of temporaries to call cudnnDestroy on the handle. + // We can achieve safety (for the narrow case of stashing within std::vectors) + // by making Handle moveable but not copyable, and transferring handle ownership + // to the latest constructed object. This is not a substitute for full-blown + // reference counting, but reference counting may be overkill here. + // Another alternative is to wrap the saved Handles in unique_ptrs, i.e., + // unordered_map>> created_handles; + Handle(const Handle& rhs) = delete; + // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom + Handle(Handle&& rhs) noexcept : Handle() { std::swap(handle, rhs.handle); } + // operator= takes argument by value + Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; } + ~Handle() { + if(handle) Destroy(handle); + } + }; + + std::mutex mutex; + + // Handles are lazily created as different threads request them, + // but are never destroyed until the end of the process. + // The maximum number of handles this process will create for each device is equal + // to the high-water mark of the number of concurrently active threads that request + // handles for that device. + // When threads terminate, they release their handles back into the pool for reuse. + // Otherwise, new handles would be created every time new threads were spawned, + // resulting in poor performance for Python modules that repeatedly or frequently + // spawned new sets of threads (like DataParallel, which creates a new set of threads + // for each forward pass). + // + // To prevent potential deadlocks, we explicitly choose not to cap the number + // of handles that are created per device. + // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device, + // only 4 can make forward progress at any time. The other 4 will not release their + // handles until they exit, so the fifth cannot make progress until then. This is + // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an + // intermediate point (ie, before any of them have exited). We have no way to anticipate + // or enforce that user threads will not attempt such intermediate synchronization. + // The only way to ensure safety is to avoid imposing a cap on the number of handles. + std::unordered_map> created_handles; + std::unordered_map> available_handles; + + // PoolWindow lazily creates and caches the handles that a particular thread is using, + // so in the common case handle access doesn't incur either handle creation or a mutex lock. + class PoolWindow + { + public: + PoolWindow(std::shared_ptr parent): weak_parent(std::move(parent)) {} + ~PoolWindow(){ release(); } + + Handle_t reserve(int device) + { + // If this thread already has a handle for this device, return it + if(my_handles.find(device) != my_handles.end()) + return my_handles[device]; + + // otherwise, either grab a handle from the pool if one is available, + // or if not, create a new one. + auto parent = weak_parent.lock(); + TORCH_CHECK(parent, "Cannot create handle during program termination"); + std::lock_guard guard(parent->mutex); + + if(parent->available_handles[device].size() > 0) + { + my_handles[device] = parent->available_handles[device].back(); + parent->available_handles[device].pop_back(); + } + else + { + // In local testing, I do observe that emplace_back sometimes routes through temporaries + // that incur move-constructor and destructor calls. See comments in Handle above. + parent->created_handles[device].emplace_back(true /*create*/); + my_handles[device] = parent->created_handles[device].back().handle; + } + + return my_handles[device]; + } + + private: + // Stores the per-device handles currently owned by this thread + std::unordered_map my_handles; + + std::weak_ptr weak_parent; + + // Called by the destructor. Releases this thread's handles back into the pool. + void release() { + if(my_handles.size() > 0) { + auto parent = weak_parent.lock(); + if (!parent) { + // If this thread exits after atexit handlers have completed, the + // cuda context itself may be invalid, so we must leak the handles. + return; + } + + std::lock_guard guard(parent->mutex); + for(auto d_h : my_handles) + parent->available_handles[d_h.first].push_back(d_h.second); + } + } + }; + + // Warning: + // If you want to change this function, be aware that this function will be called + // by multiple threads and there is no mutex guarding the call of this function, so + // make sure your implementation is thread-safe. + PoolWindow *newPoolWindow() { + // The returned pointer will be owned by a thread local variable + // so that different threads does not share the same PoolWindow. + return new PoolWindow(this->shared_from_this()); + } +}; + +}} // namespace at::cuda::detail:: diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..db8519389e9ff567b11fd9a3bb31ac3d8646fe92 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IndexUtils.cuh @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include + +namespace at::cuda::detail { + +TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t); +using at::native::canUse32BitIndexMath; + +template +TensorInfo +getTensorInfo(const at::TensorBase &t) { + IndexType sz[MAX_TENSORINFO_DIMS]; + IndexType st[MAX_TENSORINFO_DIMS]; + + int dims = t.dim(); + for (int i = 0; i < dims; ++i) { + sz[i] = t.size(i); + st[i] = t.stride(i); + } + + scalar* data_ptr = nullptr; + + if constexpr (std::is_const::value) { + data_ptr = t.const_data_ptr(); + } else { + data_ptr = t.mutable_data_ptr(); + } + + return TensorInfo( + data_ptr, dims, sz, st); +} + +} // namespace at::cuda::detail diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e8a26b5e06a6ffeeeaf5df26f09175a2c903aa01 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/IntegerDivider.cuh @@ -0,0 +1,124 @@ +#pragma once + +#include +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) +#include +#endif + +namespace at::cuda::detail { + +// A utility class to implement integer division by multiplication, given a fixed +// divisor. +// +// WARNING: The fast divider algorithm is only implemented for unsigned int; +// otherwise we default to plain integer division. For unsigned int, +// we further assume that the dividend is at most INT32_MAX. Thus, +// IntDivider must NOT be used for general integer division. +// +// This reduced range is enough for our purpose, and it allows us to +// slightly simplify the computation. +// +// (NOTE: Below, "2^k" denotes exponentiation, i.e., 1< 0), we can find a "magic number" m (2^N +// <= m < 2^(N+1)) and shift s such that: +// +// \floor(n / d) = \floor((m * n) / 2^(N+s)). +// +// Given such m and s, the integer division can be then implemented as: +// +// let m' = m - 2^N // 0 <= m' < 2^N +// +// fast_integer_division(n): +// // Multiply two N-bit unsigned integers: the result is a 2N-bit unsigned +// // integer. Then take the higher N bits. +// t = (m' * n) >> N +// +// // Here we use the fact that n is less than 2^(N-1): otherwise the value +// // of (t + n) may not fit in an N-bit integer. +// return (t + n) >> s +// +// Finding such a magic number is surprisingly easy: +// +// s = \ceil(\log_2 d) +// m' = \floor(2^N * (2^s - d) / d) + 1 // Need 2N-bit integer arithmetic. +// +// See also: +// - Division by Invariant Integers Using Multiplication, +// Torbjörn Granlund and Peter L. Montgomery, 1994. +// +// - http://www.hackersdelight.org/magic.htm +// +// - http://ridiculousfish.com/blog/posts/labor-of-division-episode-i.html + +// Result of div/mod operation stored together. +template +struct DivMod { + Value div, mod; + + C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { } +}; + +// Base case: we only have an implementation for uint32_t for now. For +// everything else, we use plain division. +template +struct IntDivider { + IntDivider() = default; + IntDivider(Value d) : divisor(d) { } + + C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; } + C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; } + C10_HOST_DEVICE inline DivMod divmod(Value n) const { + return DivMod(n / divisor, n % divisor); + } + + Value divisor; +}; + +// Implement fast integer division. +template <> +struct IntDivider { + static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); + + IntDivider() = default; + + IntDivider(unsigned int d) : divisor(d) { + assert(divisor >= 1 && divisor <= INT32_MAX); + + // TODO: gcc/clang has __builtin_clz() but it's not portable. + for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; + + uint64_t one = 1; + uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; + m1 = magic; + assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. + } + + C10_HOST_DEVICE inline unsigned int div(unsigned int n) const { +#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) + // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and + // 'm1'. + unsigned int t = __umulhi(n, m1); + return (t + n) >> shift; +#else + // Using uint64_t so that the addition does not overflow. + uint64_t t = ((uint64_t) n * m1) >> 32; + return (t + n) >> shift; +#endif + } + + C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const { + return n - div(n) * divisor; + } + + C10_HOST_DEVICE inline DivMod divmod(unsigned int n) const { + unsigned int q = div(n); + return DivMod(q, n - q * divisor); + } + + unsigned int divisor; // d above. + unsigned int m1; // Magic number: m' above. + unsigned int shift; // Shift amounts. +}; + +} // namespace at::cuda::detail diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h new file mode 100644 index 0000000000000000000000000000000000000000..95e52c94377bf568060c25f887454dbbaf2054b7 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h @@ -0,0 +1,11 @@ +#pragma once +#include +namespace at::cuda { +// Forward-declares at::cuda::NVRTC +struct NVRTC; + +namespace detail { +extern NVRTC lazyNVRTC; +} // namespace detail + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2959fe8e65b87e4113f02225079bf1ff7ef8cbc7 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/OffsetCalculator.cuh @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +// If element_sizes is nullptr, then the strides will be in bytes, otherwise +// the strides will be in # of elements. +// Operands that share the same shape, but may have different strides. +// OffsetCalculator iterates the tensor in a column-major order + +#if defined(USE_ROCM) +constexpr int MAX_DIMS = 16; +#else +constexpr int MAX_DIMS = 25; +#endif + +template +struct OffsetCalculator { + // We allow having negative strides to implement some operations like torch.flip + using stride_t = std::conditional_t, + index_t>; + // The offset for each argument. Wrapper around fixed-size array. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + // if element_sizes is nullptr, then the strides will be in bytes, otherwise + // the strides will be in # of elements. + OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides, const int64_t* element_sizes=nullptr) : dims(dims) { + TORCH_CHECK(dims <= MAX_DIMS, "tensor has too many (>", MAX_DIMS, ") dims"); + for (int i=0; i < dims; i++){ + sizes_[i] = at::cuda::detail::IntDivider(sizes[i]); + for (int arg = 0; arg < NARGS; arg++) { + int64_t element_size = (element_sizes == nullptr ? 1LL : element_sizes[arg]); + strides_[i][arg] = strides[arg][i] / element_size; + } + } + } + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = 0; + } + + #pragma unroll + for (int dim = 0; dim < MAX_DIMS; ++dim) { + if (dim == dims) { + break; + } + auto divmod = sizes_[dim].divmod(linear_idx); + linear_idx = divmod.div; + + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] += divmod.mod * strides_[dim][arg]; + } + + } + return offsets; + } + + int dims; + at::cuda::detail::IntDivider sizes_[MAX_DIMS]; + stride_t strides_[MAX_DIMS][std::max(NARGS, 1)]; +}; + +template +struct TrivialOffsetCalculator { + // The offset for each argument. Wrapper around fixed-size array. + // The offsets are in # of elements, not in bytes. + // On CUDA, zero sized array is not allowed, so when we are handling nullary + // operators, we need to create a size 1 offset to avoid compiler failure. + // This size 1 offset is just a placeholder, and we will not use it. + using offset_type = at::detail::Array(NARGS, 1)>; + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + offset_type offsets; + #pragma unroll + for (int arg = 0; arg < NARGS; arg++) { + offsets[arg] = linear_idx; + } + return offsets; + } +}; + +// Make an OffsetCalculator with byte offsets +template +static OffsetCalculator make_offset_calculator(const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + } + return OffsetCalculator(iter.ndim(), iter.shape().data(), strides.data()); +} + +// Make an OffsetCalculator with element offsets +template +static OffsetCalculator make_element_offset_calculator( + const at::TensorIteratorBase& iter) { + TORCH_INTERNAL_ASSERT(N <= iter.ntensors()); + std::array strides; + std::array element_sizes; + for (int i = 0; i < N; i++) { + strides[i] = iter.strides(i).data(); + element_sizes[i] = iter.element_size(i); + } + return OffsetCalculator( + iter.ndim(), iter.shape().data(), strides.data(), element_sizes.data()); +} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..231cd167cacb4f9b4f2f48e159431b30f3d6dc28 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh @@ -0,0 +1,43 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +// Stores RNG state values. Passed as a kernel argument. +// See Note [CUDA Graph-safe RNG states]. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +namespace at { + +struct PhiloxCudaState { + PhiloxCudaState() = default; + // Called if graph capture is not underway + PhiloxCudaState(uint64_t seed, + uint64_t offset) { + seed_.val = seed; + offset_.val = offset; + } + // Called if graph capture is underway + PhiloxCudaState(int64_t* seed, + int64_t* offset_extragraph, + uint32_t offset_intragraph) { + seed_.ptr = seed; + offset_.ptr = offset_extragraph; + offset_intragraph_ = offset_intragraph; + captured_ = true; + } + + // Public members, directly accessible by at::cuda::philox::unpack. + // If we made them private with getters/setters, the getters/setters + // would have to be __device__, and we can't declare __device__ in ATen. + union Payload { + uint64_t val; + int64_t* ptr; + }; + + Payload seed_{}; + Payload offset_{}; + uint32_t offset_intragraph_ = 0; + bool captured_ = false; +}; + +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh new file mode 100644 index 0000000000000000000000000000000000000000..70cd222a484844cdf4f4cb222af2ac6408598cbd --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/UnpackRaw.cuh @@ -0,0 +1,28 @@ +// No "#pragma once" because this is a raw definition that can be copied by jit codegen. +// Eager mode clients should not include this file directly, instead, +// they should #include , which has a #pragma once. + +namespace at::cuda::philox { + +// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether +// that instance was created with graph capture underway or not. +// See Note [CUDA Graph-safe RNG states]. +// +// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen. +// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable. +// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda. +// +// The raw definition lives in its own file so jit codegen can easily copy it. +__host__ __device__ __forceinline__ std::tuple +unpack(at::PhiloxCudaState arg) { + if (arg.captured_) { + // static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long". + // *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel. + // For most threads' reads it will hit in cache, so it shouldn't hurt performance. + return std::make_tuple(static_cast(*arg.seed_.ptr), static_cast(*(arg.offset_.ptr) + arg.offset_intragraph_)); + } else { + return std::make_tuple(arg.seed_.val, arg.offset_.val); + } +} + +} // namespace at::cuda::philox diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h new file mode 100644 index 0000000000000000000000000000000000000000..a7a440cd7b614732d5ad16eb28c56e71bb20b6e1 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator.h @@ -0,0 +1,40 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include + +#include +#include + +namespace at::cuda { + +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref); + +} // namespace at::cuda + +#else + +namespace at::cuda { + +TORCH_CUDA_CPP_API c10::SmallVector CompileAndLaunchKernel( + const std::string& code_string, + const std::string& kernel_name, + const int num_outputs, + const c10::SmallVector& tensors, + const c10::SmallVector& extra_args, + bool return_by_ref) { + TORCH_CHECK(false, "Jiterator is not supported"); + } +} // namespace at::cuda + +#endif // AT_USE_JITERATOR() diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..495cfffddcee9c49b722adc6b30e27363d066854 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h @@ -0,0 +1,249 @@ +#pragma once +#include + +#if AT_USE_JITERATOR() + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at::native { + + +#define AT_FOR_8_CASES(_) \ + _(1) \ + _(2) \ + _(3) \ + _(4) \ + _(5) \ + _(6) \ + _(7) \ + _(8) + +#define AT_FOR_8_CASES_WITH_COMMA(_) \ + _(1) , \ + _(2) , \ + _(3) , \ + _(4) , \ + _(5) , \ + _(6) , \ + _(7) , \ + _(8) + +c10::SmallVector get_extra_args_typenames(const c10::SmallVector& extra_args) { + c10::SmallVector args_typenames(extra_args.size()); + for (const auto i : c10::irange(extra_args.size())) { + args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type()); + } + return args_typenames; +} + +int can_vectorize_up_to(at::ScalarType type, char* pointer) { + switch(type) { +#define DEFINE_CASE(ctype, scalartype) \ + case ScalarType::scalartype : return memory::can_vectorize_up_to(pointer); + + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) +#undef DEFINE_CASE + + default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); + } +} + +// jitted version of the above +// See Note [Jiterator], this relies on the assumptions enumerated there +int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) { + const at::ScalarType common_dtype = iter.common_dtype(); + const at::ScalarType result_dtype = common_dtype; + + // Deals with output + int result = can_vectorize_up_to(result_dtype, static_cast(iter.data_ptr(0))); + + // Incorporates input(s) + for (auto i = 1; i < iter.ntensors(); ++i) { + result = std::min(result, can_vectorize_up_to(common_dtype, static_cast(iter.data_ptr(i)))); + } + + return result; +} + +template +static std::unique_ptr> make_unique_offset_calculator( + const TensorIteratorBase& iter) { + // array size can not be 0, this happens when N == 0 + constexpr int array_size = std::max(N, 1); + TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs())); + + std::array strides; + int64_t element_sizes[array_size]; + for (int i = 0; i < N; i++) { + int index = IS_INPUT ? i + iter.noutputs() : i; + strides[i] = iter.strides(index).data(); + element_sizes[i] = iter.element_size(index); + } + return std::make_unique>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); +} + +template +struct OffsetCalculatorVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using OffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + OffsetCalculatorVariant(const TensorIteratorBase& iter) { + int num = IS_INPUT ? iter.ninputs() : iter.noutputs(); + + switch(num) { +#define DEFINE_CASE(index) \ + case index : v = make_unique_offset_calculator(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + default: + TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + + private: + OffsetCalculatorTypes v{}; +}; + +struct ArrayVariant { +// works for up to 8 input + 8 outputs +#define DEFINE_CASE(index) at::detail::Array, at::detail::Array + using ArrayTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + ArrayVariant(const TensorIteratorBase& iter) { + int ntensors = iter.ntensors(); + switch(ntensors) { +#define DEFINE_CASE(index) \ + case index: array = at::detail::Array{}; break; \ + case index+8: array = at::detail::Array{}; break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors); + } + + std::visit([&](auto& a) { + for (auto i = 0; i < ntensors; ++i) { + a[i] = (char*)iter.data_ptr(i); + } + }, array); + } + + void* data_ptr() { + return std::visit([](auto & a){ return static_cast(&a); }, array); + } + +private: + ArrayTypes array; +}; + +struct TrivialOffsetCalculatorVariant { +#define DEFINE_CASE(index) TrivialOffsetCalculator + using TrivialOffsetCalculatorTypes = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + TrivialOffsetCalculatorVariant(int num) { + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = TrivialOffsetCalculator(); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(&v); }, v); + } + +private: + TrivialOffsetCalculatorTypes v{}; +}; + +struct LoadWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using LoadWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + LoadWithCastVariant(const TensorIteratorBase& iter) { + int arity = iter.ninputs(); + switch(arity) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + LoadWithCastPtr v{}; +}; + +struct StoreWithCastVariant { +#define DEFINE_CASE(index) std::unique_ptr> + using StoreWithCastPtr = std::variant< + AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) + >; +#undef DEFINE_CASE + + StoreWithCastVariant(const TensorIteratorBase& iter) { + int num = iter.noutputs(); + switch(num) { +#define DEFINE_CASE(index) \ + case index: v = std::make_unique>(iter); break; + + AT_FOR_8_CASES(DEFINE_CASE) +#undef DEFINE_CASE + + default: + TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num); + } + } + + void* data_ptr() { + return std::visit([](auto & v){ return static_cast(v.get()); }, v); + } + +private: + StoreWithCastPtr v{}; +}; + +} // namespace at::native + + +#endif // AT_USE_JITERATOR() diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..aba40d4f42eac74ee9435d904ac3fb82d1e988c4 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +namespace at::cuda { + +TORCH_CUDA_CPP_API const std::string &get_traits_string(); +TORCH_CUDA_CPP_API const std::string &get_cmath_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string(); +TORCH_CUDA_CPP_API const std::string &get_complex_math_string(); + +} // namespace at::cuda diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h new file mode 100644 index 0000000000000000000000000000000000000000..483b4fb7a91a07b871c804646da0a8e3863b51e7 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/GemmHipblaslt.h @@ -0,0 +1,611 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define TORCH_HIPBLASLT_CHECK(EXPR) \ + do { \ + hipblasStatus_t __err = EXPR; \ + TORCH_CHECK(__err == HIPBLAS_STATUS_SUCCESS, \ + "hipblaslt error: ", \ + hipblasStatusToString(__err), \ + " when calling `" #EXPR "`"); \ + } while (0) + +namespace at::cuda::tunable { + +template +constexpr hipblasDatatype_t HipDataTypeFor(); + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_32F; +} + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_16F; +} + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_16BF; +} + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_64F; +} + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_8F_E4M3_FNUZ; +} + +template <> +constexpr hipblasDatatype_t HipDataTypeFor() { + return HIP_R_8F_E5M2_FNUZ; +} + +template +int GetBatchFromParams(const GemmParams* params) { + return 1; +} + +template +int GetBatchFromParams(const GemmAndBiasParams* params) { + return 1; +} + +template +int GetBatchFromParams(const GemmStridedBatchedParams* params) { + return params->batch; +} + +template +int GetBatchFromParams(const ScaledGemmParams* params) { + return 1; +} + +template +int GetStrideAFromParams(const GemmParams* params) { + return 1; +} + +template +int GetStrideAFromParams(const GemmAndBiasParams* params) { + return 1; +} + +template +int GetStrideAFromParams(const GemmStridedBatchedParams* params) { + return params->stride_a; +} + +template +int GetStrideAFromParams(const ScaledGemmParams* params) { + return 1; +} + +template +int GetStrideBFromParams(const GemmParams* params) { + return 1; +} + +template +int GetStrideBFromParams(const GemmAndBiasParams* params) { + return 1; +} + +template +int GetStrideBFromParams(const GemmStridedBatchedParams* params) { + return params->stride_b; +} + +template +int GetStrideBFromParams(const ScaledGemmParams* params) { + return 1; +} + +template +int GetStrideCFromParams(const GemmParams* params) { + return 1; +} + +template +int GetStrideCFromParams(const GemmAndBiasParams* params) { + return 1; +} + +template +int GetStrideCFromParams(const GemmStridedBatchedParams* params) { + return params->stride_c; +} + +template +int GetStrideCFromParams(const ScaledGemmParams* params) { + return 1; +} + +template +float GetAlphaFromParams(const GemmParams* params) { + return params->alpha; +} + +template +float GetAlphaFromParams(const GemmAndBiasParams* params) { + return params->alpha; +} + +template +float GetAlphaFromParams(const GemmStridedBatchedParams* params) { + return params->alpha; +} + +template +float GetAlphaFromParams(const ScaledGemmParams* params) { + return 1.0; +} + +template +float GetBetaFromParams(const GemmParams* params) { + return params->beta; +} + +template +float GetBetaFromParams(const GemmAndBiasParams* params) { + return 0.0; +} + +template +float GetBetaFromParams(const GemmStridedBatchedParams* params) { + return params->beta; +} + +template +float GetBetaFromParams(const ScaledGemmParams* params) { + return 0.0; +} + +template +const void* GetAScalePointerFromParams(const GemmParams* params) { + return nullptr; +} + +template +const void* GetAScalePointerFromParams(const GemmAndBiasParams* params) { + return nullptr; +} + +template +const void* GetAScalePointerFromParams(const GemmStridedBatchedParams* params) { + return nullptr; +} + +template +const void* GetAScalePointerFromParams(const ScaledGemmParams* params) { + return params->a_scale_ptr; +} + +template +const void* GetBScalePointerFromParams(const GemmParams* params) { + return nullptr; +} + +template +const void* GetBScalePointerFromParams(const GemmAndBiasParams* params) { + return nullptr; +} + +template +const void* GetBScalePointerFromParams(const GemmStridedBatchedParams* params) { + return nullptr; +} + +template +const void* GetBScalePointerFromParams(const ScaledGemmParams* params) { + return params->b_scale_ptr; +} + +template +const void* GetDScalePointerFromParams(const GemmParams* params) { + return nullptr; +} + +template +const void* GetDScalePointerFromParams(const GemmAndBiasParams* params) { + return nullptr; +} + +template +const void* GetDScalePointerFromParams(const GemmStridedBatchedParams* params) { + return nullptr; +} + +template +const void* GetDScalePointerFromParams(const ScaledGemmParams* params) { + return params->c_scale_ptr; +} + +template +const void* GetBiasPointerFromParams(const GemmParams* params) { + return nullptr; +} + +template +const void* GetBiasPointerFromParams(const GemmAndBiasParams* params) { + return params->bias; +} + +template +const void* GetBiasPointerFromParams(const GemmStridedBatchedParams* params) { + return nullptr; +} + +template +const void* GetBiasPointerFromParams(const ScaledGemmParams* params) { + return params->bias_ptr; +} + +template +hipDataType GetBiasTypeFromParams(const GemmParams* params) { + return HIP_R_32F; +} + +template +hipDataType GetBiasTypeFromParams(const GemmAndBiasParams* params) { + return HipDataTypeFor(); +} + +template +hipDataType GetBiasTypeFromParams(const GemmStridedBatchedParams* params) { + return HIP_R_32F; +} + +template +hipDataType GetBiasTypeFromParams(const ScaledGemmParams* params) { + return at::cuda::ScalarTypeToCudaDataType(params->bias_dtype); +} + +template +at::cuda::blas::GEMMAndBiasActivationEpilogue GetActivationFromParams(const GemmParams* params) { + return at::cuda::blas::GEMMAndBiasActivationEpilogue::None; +} + +template +at::cuda::blas::GEMMAndBiasActivationEpilogue GetActivationFromParams(const GemmAndBiasParams* params) { + return params->activation; +} + +template +at::cuda::blas::GEMMAndBiasActivationEpilogue GetActivationFromParams(const GemmStridedBatchedParams* params) { + return at::cuda::blas::GEMMAndBiasActivationEpilogue::None; +} + +template +at::cuda::blas::GEMMAndBiasActivationEpilogue GetActivationFromParams(const ScaledGemmParams* params) { + return at::cuda::blas::GEMMAndBiasActivationEpilogue::None; +} + +static hipblasOperation_t _hipblasOpFromChar(char op) { + switch (op) { + case 'n': + case 'N': + return HIPBLAS_OP_N; + case 't': + case 'T': + return HIPBLAS_OP_T; + case 'c': + case 'C': + return HIPBLAS_OP_C; + } + AT_ERROR( + "_hipblasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`"); +} + +static char _charFromhipblasOp(hipblasOperation_t op) { + switch (op) { + case HIPBLAS_OP_N: + return 'N'; + case HIPBLAS_OP_T: + return 'T'; + case HIPBLAS_OP_C: + return 'C'; + } + AT_ERROR( + "_charFromhipblasOp input should be HIPBLAS_OP_N/T/C but got `", op, "`"); +} + +static hipblasOperation_t MapLayoutToHipBlasLt(BlasOp layout) { + if (layout == BlasOp::N) { + return HIPBLAS_OP_N; + } + return HIPBLAS_OP_T; +} + +static size_t GetHipblasltWorkspaceSize() { + static const char * env = getenv("HIPBLASLT_WORKSPACE_SIZE"); + // 256MB is max workspace size allowed for hipblaslt + // hipblaslt-bench uses 32MB + // recommendation from hipblaslt author was 76MB + size_t workspace_size = 32*1024; // going with 32MB + if (env) { + try { + workspace_size = std::stoi(env); + } catch(std::invalid_argument const& e) { + TORCH_WARN("invalid HIPBLASLT_WORKSPACE_SIZE,", + " using default workspace size of ", workspace_size, " KiB."); + } catch(std::out_of_range const& e) { + TORCH_WARN("HIPBLASLT_WORKSPACE_SIZE out of range,", + " using default workspace size of ", workspace_size, " KiB."); + } + } + return workspace_size * 1024; +} + +template +struct HipBlasLtDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDABLAS_CHECK(destructor(x)); + } + } +}; + +template +class HipBlasLtDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr> descriptor_; +}; + +class HipBlasLtMatmulDescriptor : public HipBlasLtDescriptor< + hipblasLtMatmulDescOpaque_t, + &hipblasLtMatmulDescDestroy> { + public: + HipBlasLtMatmulDescriptor( + hipblasComputeType_t compute_type, + hipDataType scale_type) { + hipblasLtMatmulDesc_t raw_descriptor = nullptr; + TORCH_HIPBLASLT_CHECK( + hipblasLtMatmulDescCreate(&raw_descriptor, compute_type, scale_type)); + descriptor_.reset(raw_descriptor); + } + template + inline void setAttribute(hipblasLtMatmulDescAttributes_t attr, const T value) { + TORCH_HIPBLASLT_CHECK(::hipblasLtMatmulDescSetAttribute(descriptor(), attr, &value, sizeof(T))); + } +}; + +template +class HipblasltGemmOp : public Callable { + public: + HipblasltGemmOp(hipblasLtMatmulAlgo_t algo) : algo_{algo} {} + + TuningStatus Call(const ParamsT* params) override { + hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout); + hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout); + auto a_datatype = HipDataTypeFor(); + auto b_datatype = HipDataTypeFor(); + auto in_out_datatype = HipDataTypeFor(); + auto opa = _hipblasOpFromChar(params->transa); + auto opb = _hipblasOpFromChar(params->transb); + + TORCH_CHECK(transa_outer == opa && transb_outer == opb, "trans mismatch, shouldn't happen"); + + float alpha = GetAlphaFromParams(params); + float beta = GetBetaFromParams(params); + + hipblasLtMatrixLayout_t mat_a, mat_b, mat_c; + if (opa == HIPBLAS_OP_N) { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, a_datatype, params->m, params->k, params->lda)); + } + else { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_a, a_datatype, params->k, params->m, params->lda)); + } + if (opb == HIPBLAS_OP_N) { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, b_datatype, params->k, params->n, params->ldb)); + } + else { + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_b, b_datatype, params->n, params->k, params->ldb)); + } + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutCreate(&mat_c, in_out_datatype, params->m, params->n, params->ldc)); + + // specific to batched gemmm + int batch = GetBatchFromParams(params); + if (batch > 1) { + int64_t stride_a = GetStrideAFromParams(params); + int64_t stride_b = GetStrideBFromParams(params); + int64_t stride_c = GetStrideCFromParams(params); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_a, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_a, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_a, sizeof(stride_a))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_b, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_b, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_b, sizeof(stride_b))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_c, HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch, sizeof(batch))); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutSetAttribute( + mat_c, HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, &stride_c, sizeof(stride_c))); + } + + HipBlasLtMatmulDescriptor matmul(HIPBLAS_COMPUTE_32F, HIP_R_32F); + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_TRANSA, opa); + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_TRANSB, opb); + + // specific to scaled gemm + const void* mat1_scale_ptr = GetAScalePointerFromParams(params); + const void* mat2_scale_ptr = GetBScalePointerFromParams(params); + const void* result_scale_ptr = GetDScalePointerFromParams(params); + if (mat1_scale_ptr && mat2_scale_ptr) { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_A_SCALE_POINTER, mat1_scale_ptr); + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_B_SCALE_POINTER, mat2_scale_ptr); + } + if (result_scale_ptr) { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_D_SCALE_POINTER, result_scale_ptr); + } + + const void* bias_ptr = GetBiasPointerFromParams(params); + auto bias_datatype = GetBiasTypeFromParams(params); + if (bias_ptr) { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_BIAS_POINTER, bias_ptr); + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_BIAS_DATA_TYPE, bias_datatype); + auto activation = GetActivationFromParams(params); + if (activation == at::cuda::blas::GEMMAndBiasActivationEpilogue::RELU) { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_EPILOGUE, HIPBLASLT_EPILOGUE_RELU_BIAS); + } + else if (activation == at::cuda::blas::GEMMAndBiasActivationEpilogue::GELU) { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_EPILOGUE, HIPBLASLT_EPILOGUE_GELU_BIAS); + } + else { + matmul.setAttribute(HIPBLASLT_MATMUL_DESC_EPILOGUE, HIPBLASLT_EPILOGUE_BIAS); + } + } + + size_t workspace_size = GetHipblasltWorkspaceSize(); + + auto op_handle = at::cuda::getCurrentCUDABlasLtHandle(); + + size_t ret_workspace_size = 0; + auto status = hipblaslt_ext::matmulIsAlgoSupported(op_handle, + matmul.descriptor(), + &alpha, + mat_a, + mat_b, + &beta, + mat_c, + mat_c, + algo_, + ret_workspace_size); + + if (status == HIPBLAS_STATUS_SUCCESS) { + if (ret_workspace_size >= workspace_size) { + return FAIL; + } + } + else { + return FAIL; + } + + void* workspace_buffer = nullptr; + if (workspace_size > 0) { + workspace_buffer = c10::cuda::CUDACachingAllocator::raw_alloc(workspace_size); + } + + TORCH_HIPBLASLT_CHECK(hipblasLtMatmul(op_handle, + matmul.descriptor(), + &alpha, + params->a, + mat_a, + params->b, + mat_b, + &beta, + params->c, + mat_c, + params->c, + mat_c, + &algo_, + workspace_buffer, + workspace_size, + at::cuda::getCurrentCUDAStream())); + + //TORCH_HIPBLASLT_CHECK(hipblasLtMatmulDescDestroy(matmul)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_a)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_b)); + TORCH_HIPBLASLT_CHECK(hipblasLtMatrixLayoutDestroy(mat_c)); + if (workspace_size > 0) { + c10::cuda::CUDACachingAllocator::raw_delete(workspace_buffer); + } + return OK; + } + + private: + hipblasLtMatmulAlgo_t algo_; +}; + +template +auto GetHipBlasLtTypeStringAndOps() { + hipblasOperation_t transa_outer = MapLayoutToHipBlasLt(ALayout); + hipblasOperation_t transb_outer = MapLayoutToHipBlasLt(BLayout); + auto a_datatype = HipDataTypeFor(); + auto b_datatype = HipDataTypeFor(); + auto in_out_datatype = HipDataTypeFor(); + std::vector heuristic_result; + + hipblasLtHandle_t handle; + TORCH_HIPBLASLT_CHECK(hipblasLtCreate(&handle)); + TORCH_HIPBLASLT_CHECK(hipblaslt_ext::getAllAlgos(handle, + hipblaslt_ext::GemmType::HIPBLASLT_GEMM, + transa_outer, + transb_outer, + a_datatype, + b_datatype, + in_out_datatype, + in_out_datatype, + HIPBLAS_COMPUTE_32F, + heuristic_result)); + TORCH_HIPBLASLT_CHECK(hipblasLtDestroy(handle)); + + // Sort heuristic_result by algo index to make sure the order of returned algos is deterministic. + std::sort(heuristic_result.begin(), + heuristic_result.end(), + [](hipblasLtMatmulHeuristicResult_t& a, hipblasLtMatmulHeuristicResult_t& b) { + return hipblaslt_ext::getIndexFromAlgo(a.algo) < hipblaslt_ext::getIndexFromAlgo(b.algo); + }); + + int returned_algo_count = heuristic_result.size(); + std::vector>>> ret; + for (int i = 0; i < returned_algo_count; i++) { + auto algo = heuristic_result[i].algo; + int algo_index = hipblaslt_ext::getIndexFromAlgo(algo); + auto callable = std::make_unique>(algo); + std::string type_string = c10::str( + "Gemm_Hipblaslt_", _charFromhipblasOp(transa_outer), _charFromhipblasOp(transb_outer), "_", algo_index); + ret.emplace_back(type_string, std::move(callable)); + } + + return ret; +} + +template +auto GetHipBlasLtGemmTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +template +auto GetHipBlasLtGemmAndBiasTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +template +auto GetHipBlasLtGemmStridedBatchedTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +template +auto GetHipBlasLtScaledGemmTypeStringAndOps() { + return GetHipBlasLtTypeStringAndOps>(); +} + +#undef TORCH_HIPBLASLT_CHECK + +} // namespace at::cuda::tunable diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h new file mode 100644 index 0000000000000000000000000000000000000000..50a7344b0260c24ab432b3f0462244d3322a5d52 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableGemm.h @@ -0,0 +1,307 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include +#ifdef USE_ROCM +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at::cuda::tunable { + +template +class DefaultGemmOp : public Callable> { + public: + TuningStatus Call(const GemmParams* params) override { + at::cuda::blas::gemm_internal( + params->transa, params->transb, + params->m, params->n, params->k, + params->alpha, + params->a, params->lda, + params->b, params->ldb, + params->beta, + params->c, params->ldc); + return OK; + } +}; + +static bool _transposeBoolFromChar(char op) { + return op == 't' || op == 'T'; +} + +template +class DefaultGemmAndBiasOp : public Callable> { + public: + TuningStatus Call(const GemmAndBiasParams* params) override { + at::cuda::blas::gemm_and_bias( + _transposeBoolFromChar(params->transa), + _transposeBoolFromChar(params->transb), + params->m, params->n, params->k, + params->alpha, + params->a, params->lda, + params->b, params->ldb, + params->bias, + params->c, params->ldc, + params->activation); + return OK; + } +}; + +template +class DefaultGemmStridedBatchedOp : public Callable> { + public: + TuningStatus Call(const GemmStridedBatchedParams* params) override { + at::cuda::blas::bgemm_internal( + params->transa, params->transb, + params->m, params->n, params->k, + params->alpha, + params->a, params->lda, params->stride_a, + params->b, params->ldb, params->stride_b, + params->beta, + params->c, params->ldc, params->stride_c, + params->batch); + return OK; + } +}; + +template +class DefaultScaledGemmOp : public Callable> { + public: + TuningStatus Call(const ScaledGemmParams* params) override { + at::cuda::blas::scaled_gemm( + params->transa, + params->transb, + params->m, + params->n, + params->k, + params->a, + params->a_scale_ptr, + params->lda, + params->a_dtype, + params->b, + params->b_scale_ptr, + params->ldb, + params->b_dtype, + params->bias_ptr, + params->bias_dtype, + params->c, + params->c_scale_ptr, + params->ldc, + params->c_dtype, + params->amax_ptr, + params->use_fast_accum); + return OK; + } +}; + +template +inline bool IsZero(T v) { + return v == 0.0f; +} + +template <> +inline bool IsZero(BFloat16 v) { + return v.x == 0; +} + +template <> +inline bool IsZero(Half v) { + return float(v) == 0.0f; +} + +template <> +inline bool IsZero(c10::complex v) { + return v == 0.0; +} + +template <> +inline bool IsZero(c10::complex v) { + return v == 0.0f; +} + +template +inline std::string TypeName(T v) { + return "unknown"; +} + +template <> +inline std::string TypeName(float v) { + return "float"; +} + +template <> +inline std::string TypeName(double v) { + return "double"; +} + +template <> +inline std::string TypeName(BFloat16 v) { + return "BFloat16"; +} + +template <> +inline std::string TypeName(Half v) { + return "Half"; +} + +template <> +inline std::string TypeName(Float8_e4m3fn v) { + return "Float8_e4m3fn"; +} + +template <> +inline std::string TypeName(Float8_e5m2 v) { + return "Float8_e5m2"; +} + +template <> +inline std::string TypeName(Float8_e4m3fnuz v) { + return "Float8_e4m3fnuz"; +} + +template <> +inline std::string TypeName(Float8_e5m2fnuz v) { + return "Float8_e5m2fnuz"; +} + +template <> +inline std::string TypeName(c10::complex v) { + return "c10::complex"; +} + +template <> +inline std::string TypeName(c10::complex v) { + return "c10::complex"; +} + +template +class GemmTunableOp : public TunableOp, StreamTimer> { + public: + GemmTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + +#ifdef USE_ROCM + static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED"); + if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) { + for (auto&& [name, op] : GetRocBlasGemmTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + + static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); + if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) { + // disallow tuning of hipblaslt with c10::complex + if constexpr ( + !std::is_same_v> && + !std::is_same_v>) { + for (auto&& [name, op] : GetHipBlasLtGemmTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + } +#endif + } + + std::string Signature() override { + return c10::str("GemmTunableOp_", TypeName(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +template +class GemmAndBiasTunableOp : public TunableOp, StreamTimer> { + public: + GemmAndBiasTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + +#ifdef USE_ROCM + static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); + if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) { + // disallow tuning of hipblaslt with c10::complex + if constexpr ( + !std::is_same_v> && + !std::is_same_v>) { + for (auto&& [name, op] : GetHipBlasLtGemmAndBiasTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + } +#endif + } + + std::string Signature() override { + return c10::str("GemmAndBiasTunableOp_", TypeName(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +template +class GemmStridedBatchedTunableOp : public TunableOp, StreamTimer> { + public: + GemmStridedBatchedTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + +#ifdef USE_ROCM + static const char *env_rocblas = std::getenv("PYTORCH_TUNABLEOP_ROCBLAS_ENABLED"); + if (env_rocblas == nullptr || strcmp(env_rocblas, "1") == 0) { + for (auto&& [name, op] : GetRocBlasGemmStridedBatchedTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + + static const char *env_hipblaslt = std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED"); + if (env_hipblaslt == nullptr || strcmp(env_hipblaslt, "1") == 0) { + // disallow tuning of hipblaslt with c10::complex + if constexpr ( + !std::is_same_v> && + !std::is_same_v>) { + for (auto&& [name, op] : GetHipBlasLtGemmStridedBatchedTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } + } + } +#endif + } + + std::string Signature() override { + return c10::str("GemmStridedBatchedTunableOp_", TypeName(T{}), "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +template +class ScaledGemmTunableOp : public TunableOp, StreamTimer> { + public: + ScaledGemmTunableOp() { + this->RegisterOp(std::string("Default"), std::make_unique>()); + +#ifdef USE_ROCM + for (auto&& [name, op] : GetHipBlasLtScaledGemmTypeStringAndOps()) { + this->RegisterOp(std::move(name), std::move(op)); + } +#endif + } + + std::string Signature() override { + return c10::str("ScaledGemmTunableOp", + "_", TypeName(AT{}), + "_", TypeName(BT{}), + "_", TypeName(CT{}), + "_", BlasOpToString(ALayout), BlasOpToString(BLayout)); + } +}; + +} // namespace at::cuda::tunable diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h new file mode 100644 index 0000000000000000000000000000000000000000..9fb7afdb7627f43211dba2ee215cb20b30ef4453 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/cuda/tunable/TunableOp.h @@ -0,0 +1,286 @@ +// Original TunableOp is from onnxruntime. +// https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/tunable.h +// https://github.com/microsoft/onnxruntime/tree/main/onnxruntime/core/providers/rocm/tunable +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. +// +// Adapting TunableOp into PyTorch +// Copyright (c) Advanced Micro Devices, Inc. +// +#pragma once + +#include +#include +#include + +#ifndef _WIN32 +#include +#endif + +#include +#include +#include +#include + +namespace at::cuda::tunable { + +template +class Callable { + public: + Callable() = default; + Callable(Callable&&) = default; + virtual ~Callable() = default; + virtual TuningStatus Call(const ParamsT*) { + return FAIL; + } + virtual TuningStatus IsSupported(const ParamsT* params) { + return Call(params); + } +}; + +template +class TunableOp { + public: + TunableOp() = default; + TunableOp(TunableOp&&) = default; + virtual ~TunableOp() = default; + + TuningStatus operator()(const ParamsT* params) { + ResultEntry result = ResultEntry::Null(); + TuningContext* ctx = getTuningContext(); + if (ctx->IsTunableOpEnabled()) { + auto& mgr = ctx->GetTuningResultsManager(); + auto op_sig = Signature(); + auto params_sig = params->Signature(); + result = mgr.Lookup(op_sig, params_sig); + // If there is not previous tuning result been found, we do the tuning iff tuning is enabled + if (result == ResultEntry::Null() && ctx->IsTuningEnabled()) { + result = FindFastest(params); + mgr.Add(op_sig, params_sig, result); + } + } + else { + result = ResultEntry::Default(); + } + if (result == ResultEntry::Null()) { + TUNABLE_LOG2("no result, using default"); + result = ResultEntry::Default(); + } + auto iter = ops_.find(result); + TORCH_CHECK(iter != ops_.end()); + return iter->second->Call(params); + } + + virtual std::string Signature() { + // According to C++17 standard https://wg21.link/n4659 section 15.7.4 + // > if the operand of typeid refers to the + // > object under construction or destruction, typeid yields the std::type_info object representing the constructor + // > or destructor’s class. + // So delay the op signature generation. + c10::call_once(signature_init_once_, [this]() { signature_ = CreateSignature(); }); + return signature_; + } + + protected: + void RegisterOp(const std::string& name, std::unique_ptr> op) { + this->op_names_.emplace_back(name); + this->ops_.emplace(name, std::move(op)); + } + + private: + static void WarmUp(Callable *op, const std::vector ¶m, size_t num_iter, size_t &offset) { + TuningContext* ctx = getTuningContext(); + bool do_flush = ctx->IsICacheFlushEnabled(); + for (size_t i = 0; i < num_iter; i++) { + if (do_flush) { + at::cuda::flush_icache(); + } + TORCH_CHECK(op->Call(param[(i+offset++)%param.size()]) == OK); + } + } + + static double Profile(Callable *op, const std::vector ¶m, size_t num_iter, size_t &offset) { + TuningContext* ctx = getTuningContext(); + bool do_flush = ctx->IsICacheFlushEnabled(); + TimerT timer{}; + timer.Start(); + for (size_t i = 0; i < num_iter; i++) { + if (do_flush) { + at::cuda::flush_icache(); + } + TORCH_CHECK(op->Call(param[(i+offset++)%param.size()]) == OK); + } + timer.End(); + return timer.Duration() / num_iter; + } + + protected: + virtual ResultEntry FindFastest(const ParamsT* params) { + TuningContext* ctx = getTuningContext(); + auto op_sig = Signature(); + auto params_sig = params->Signature(); + TUNABLE_LOG2("finding fastest for ", op_sig, '(', params_sig, ')', " out of ", op_names_.size(), " candidates"); + auto min_duration_ms = std::numeric_limits::infinity(); + std::string id_name = "Default"; + ParamsT* reference_params = nullptr; + + // numeric check option is controlled by non-static env var, so check it once per tuned operator + bool do_numerics_check = ctx->IsNumericsCheckEnabled(); + + // calcaulte a reference answer for numerical check + if (do_numerics_check) { + reference_params = params->DeepCopy(false); + TORCH_CHECK(ops_[ResultEntry::Default()]->Call(reference_params) == OK); + } + + // need copies of params to reuse + // make as many copies as will fill the requested rotating buffer size, if requested + // rotating_size guaranteed to be >= 0 even though GetRotatingBufferSize() returns int + size_t rotating_size = ctx->GetRotatingBufferSize(); + bool use_buffer_rotation = (rotating_size > 0); + size_t param_size = params->GetSize(use_buffer_rotation); + size_t param_count = (rotating_size / param_size) + 1; + constexpr size_t MB = 1024*1024; + if (use_buffer_rotation) { + TUNABLE_LOG2("Rotating buffer ", rotating_size/MB, " MiB. ", + "Needed Size: ", param_size/MB, " MiB. ", + "Needed number of param copies: ", param_count); + } + TORCH_CHECK(param_count > 0); + + std::vector reusable_params(param_count); + for (size_t i = 0; i < param_count; i++) { + reusable_params[i] = params->DeepCopy(use_buffer_rotation); + } + + // for rotating buffer + size_t offset = 0; + + for (size_t i = 0; i < op_names_.size(); i++) { + auto* candidate = ops_[op_names_[i]].get(); // borrow pointer + + if (do_numerics_check) { + ParamsT* numerical_params = params->DeepCopy(false); + auto status = candidate->Call(numerical_params); + if (status != OK) { + numerical_params->Delete(); + TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + status = reference_params->NumericalCheck(numerical_params); + numerical_params->Delete(); + if (status != OK) { + TUNABLE_LOG3("├──numerics check failed for id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + } + else { + auto status = candidate->Call(reusable_params[0]); + if (status != OK) { + TUNABLE_LOG3("├──unsupported id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + } + + // collect a small profile + constexpr const int approx_num_iter = 3; + auto approx_duration = Profile(candidate, reusable_params, approx_num_iter, offset); + // bail if too slow + if (approx_duration > 2 * min_duration_ms) { + TUNABLE_LOG3("├──skip slow instance id=", i, ", ", op_sig, '(', params_sig, ") ", op_names_[i]); + continue; + } + + // for warmup does user set max duration, max iters, or both? + // warmup is allowed to be skipped by setting either iterations or duration to 0 + double max_warmup_duration = ctx->GetMaxWarmupDurationMs(); + int max_warmup_iter = ctx->GetMaxWarmupIterations(); + int warmup_iter = 1; // default + if (max_warmup_duration >= 0) { + int duration_iters = max_warmup_duration / approx_duration; + if (max_warmup_iter >= 0) { + warmup_iter = std::min(max_warmup_iter, duration_iters); + } + else { + warmup_iter = duration_iters; + } + } + else if (max_warmup_iter >= 0) { + warmup_iter = max_warmup_iter; + } + + // for tuning does user set max duration, max iters, or both? + double max_tuning_duration = ctx->GetMaxTuningDurationMs(); + int max_tuning_iter = ctx->GetMaxTuningIterations(); + int tuning_iter = 100; // default + if (max_tuning_duration > 0) { + int duration_iters = max_tuning_duration / approx_duration; + if (max_tuning_iter > 0) { + tuning_iter = std::min(max_tuning_iter, duration_iters); + } + else { + tuning_iter = duration_iters; + } + } + else if (max_tuning_iter > 0) { + tuning_iter = max_tuning_iter; + } + // tuning must run at least 1 iteration + tuning_iter = std::max(1, tuning_iter); + + // do the full warmup followed by tuning + double warmup_ms = warmup_iter * approx_duration; + double tuning_ms = tuning_iter * approx_duration; + TUNABLE_LOG3("├──tuning using " + "warmup iters ", warmup_iter, " [", warmup_ms, " ms] " + "and tuning iters ", tuning_iter, " [", tuning_ms, " ms] ", + "instance id=", i, ", ", op_sig, "(", params_sig, ") ", op_names_[i]); + TUNABLE_LOG3("├──offset at ", offset); + WarmUp(candidate, reusable_params, warmup_iter, offset); + auto duration_ms = Profile(candidate, reusable_params, tuning_iter, offset); + if (duration_ms < min_duration_ms) { + TUNABLE_LOG3("├──found better instance id=", i, ". " , duration_ms, "ms. ", op_names_[i]); + min_duration_ms = duration_ms; + id_name = op_names_[i]; + } + } + + for (size_t i = 0; i < reusable_params.size(); i++) { + reusable_params[i]->Delete(); + } + if (reference_params) { + reference_params->Delete(); + } + + TUNABLE_LOG2("└──found fastest for ", op_sig, '(', params_sig, ") ", id_name); + return ResultEntry(id_name, min_duration_ms); + } + + private: + std::string CreateSignature() { +#ifndef _WIN32 + const auto* name = typeid(*this).name(); + char buf[256]; + size_t buf_len = 256; + abi::__cxa_demangle(name, buf, &buf_len, nullptr); + buf[255] = '\0'; + return buf; +#else + return typeid(*this).name(); +#endif + } + + mutable c10::once_flag signature_init_once_; + std::string signature_; + + std::unordered_map>> ops_; + std::vector op_names_; +}; + +struct OpParams { + OpParams() {} + virtual ~OpParams() = default; + virtual std::string Signature() const = 0; +}; + +} // namespace at::cuda::tunable diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h new file mode 100644 index 0000000000000000000000000000000000000000..a0ec83d976bc89b91a23f9c54bcd0a03bae729a4 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include + +namespace at { namespace native { + +// This function makes tensors which have zero stride contiguous, by +// setting the strides to 1. +inline Tensor contiguousIfZeroInStrides(const Tensor& t) { + for (auto s : t.strides()) { + if (s == 0) return t.contiguous(); + } + return t; +} + +}} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..64243bc52d84dadac57f7d35e5ebdb2b6182c64b --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..90747c264b1564d072d8f57fdd38b2ef743ea62c --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/BucketizationUtils.h @@ -0,0 +1,173 @@ +#pragma once + +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at::native { + +// original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to +// the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not +// match, will change them to be a common super type so comparisons are done between the same types. +// For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the +// corresponding raw_* version should be used since it was already contiguous of the right type. +inline void searchsorted_maybe_trim_input_tensors( + Tensor& trimmed_input, + Tensor& trimmed_boundaries, + Tensor& trimmed_sorter, + const Tensor& raw_input, + const Tensor& raw_boundaries, + const Tensor& raw_sorter) { + bool in_is_contiguous = raw_input.is_contiguous(); + bool bd_is_contiguous = raw_boundaries.is_contiguous(); + bool sort_is_contiguous = raw_sorter.is_contiguous(); + + if (!in_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value " + "tensor if possible. This message will only appear once per program."); + trimmed_input = raw_input.contiguous(); + } + if (!bd_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary " + "tensor if possible. This message will only appear once per program."); + trimmed_boundaries = raw_boundaries.contiguous(); + } + if (!sort_is_contiguous) { + TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due " + "to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter " + "tensor if possible. This message will only appear once per program."); + trimmed_sorter = raw_sorter.contiguous(); + } + if (raw_input.dtype() != raw_boundaries.dtype()) { + at::native::ResultTypeState state = {}; + state = at::native::update_result_type_state(raw_boundaries, state); + state = at::native::update_result_type_state(raw_input, state); + ScalarType common_stype = at::native::result_type(state); + + TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined); + if (common_stype != raw_input.scalar_type()) { + trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype); + } + if (common_stype != raw_boundaries.scalar_type()) { + trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype); + } + } +} + +/* unused but needed for internal jagged tensor class */ +inline void searchsorted_maybe_trim_input_tensors( + Tensor& trimmed_input, + Tensor& trimmed_boundaries, + const Tensor& raw_input, + const Tensor& raw_boundaries) { + Tensor trimmed_sorter; + Tensor raw_sorter; + return searchsorted_maybe_trim_input_tensors( + trimmed_input, + trimmed_boundaries, + trimmed_sorter, + raw_input, + raw_boundaries, + raw_sorter); +} + +inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) { + if (boundaries.dim() != input.dim()) { + return false; + } + const auto& dims_bd = boundaries.sizes(); + const auto& dims_in = input.sizes(); + for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) { + if (dims_bd[dim] != dims_in[dim]) { + return false; + } + } + return true; +} + +inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) { + auto tensor = c10::scalar_to_tensor(scalar, device); + // This is to adopt the scalar promotion rules defined in native/TypeProperties.h + // So we have the same type promotion rules as binary operations. + tensor.unsafeGetTensorImpl()->set_wrapped_number(true); + return tensor; +} + +inline void searchsorted_pre_check( + const Tensor& boundaries, + const Tensor& input, + const Tensor& output, + const bool out_int32, + const bool right, + const std::optional side_opt, + const Tensor& sorter) { + if (side_opt) { + const c10::string_view side = *side_opt; + TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ", + "got ", side); + + // assume the user has not explicitly set (right=False, side="right") + TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side " + "of ", side, " while right was True"); + } + + TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ", + "should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ", + "tensor device type ", input.device()); + + if (sorter.defined()) { + TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ", + "have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ", + "device type ", boundaries.device()); + + TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same " + "size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes()); + + TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ", + "dtype but got dtype ", sorter.scalar_type()); + + if (sorter.numel() > 0) { + auto minmax = sorter.aminmax(); + int64_t vmin = std::get<0>(minmax).item().toLong(); + int64_t vmax = std::get<1>(minmax).item().toLong(); + TORCH_CHECK(vmin >= 0 && vmax < sorter.sizes().back(), "torch.searchsorted(): sorter index out of range"); + } + } + + TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1), + "torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ", + "boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(", + input.numel(), ")"); + + TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ", + "got 0 dimension"); + + TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input), + "torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ", + "and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ", + input.sizes()); + + ScalarType output_dtype = output.scalar_type(); + TORCH_CHECK( + (output_dtype == ScalarType::Long && !out_int32) || + (output_dtype == ScalarType::Int && out_int32), + "torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ", + "whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype, + " and out_int32 flag is ", (out_int32 ? "True" : "False")); + + if (out_int32) { + TORCH_CHECK(boundaries.sizes().back() < INT_MAX, + "torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ", + boundaries.sizes().back()); + } +} + +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h new file mode 100644 index 0000000000000000000000000000000000000000..db9742e04021e6fa6942c540c28f4ca6ff90d5df --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h @@ -0,0 +1,13 @@ +#pragma once +#include +#include + +namespace at { +class TensorBase; +} + +namespace at::native { + +TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits::max()); + +} diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h new file mode 100644 index 0000000000000000000000000000000000000000..a8fbe13b8da0b9016e88c937ccbd2781eee0e160 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/ForeachUtils.h @@ -0,0 +1,396 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +#include +#include + +namespace at::native { +namespace { +// Check if tensor list has either a boolean tensor or a integer tensor +inline bool has_integral_tensor(TensorList tensors, const bool includeBool) { + return std::any_of( + tensors.begin(), tensors.end(), [&includeBool](const auto& t) { + return at::isIntegralType(t.scalar_type(), includeBool); + }); +} +// check if tensor list has bool tensors +inline bool has_bool_tensor(TensorList tensors) { + return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool { + return t.scalar_type() == ScalarType::Bool; + }); +} + +// Check foreach API restrictions +// - Tensor lists must be non-empty. +// - All TensorLists and ScalarLists must have the same number of elements. +// - Corresponding tensors must have the same size. +inline void check_foreach_api_restrictions(TensorList tensors) { + TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor."); +} + +inline void check_foreach_api_restrictions( + TensorList tensors, + ArrayRef scalars) { + check_foreach_api_restrictions(tensors); + TORCH_CHECK( + tensors.size() == scalars.size(), + "Tensor list must have same number of elements as scalar list."); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2) { + TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK( + tensors1.size() == tensors2.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors2.size()); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2, + TensorList tensors3) { + TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK(!tensors3.empty(), "Tensor list must have at least one tensor."); + TORCH_CHECK( + tensors1.size() == tensors2.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors2.size()); + TORCH_CHECK( + tensors1.size() == tensors3.size(), + "Tensor lists must have the same number of tensors, got ", + tensors1.size(), + " and ", + tensors3.size()); +} + +inline void check_foreach_api_restrictions( + TensorList tensors1, + TensorList tensors2, + TensorList tensors3, + ArrayRef scalars) { + check_foreach_api_restrictions(tensors1, tensors2, tensors3); + TORCH_CHECK( + tensors1.size() == scalars.size(), + "Tensor list must have same number of elements as scalar list, got ", + tensors1.size(), + " and ", + scalars.size()); +} + +// Helper function called in check_fast_path_restrictions to check whether all +// corresponding tensors (aligning in index across the tensorLists) share the +// same device and dtype. +inline bool _check_tensors_share_device_and_dtype( + ArrayRef tensorLists, + const bool skip_dtype_check = false) { + const auto expected_dtype = tensorLists[0][0].dtype(); + const auto expected_device = tensorLists[0][0].device(); + + auto is_tensor_okay = [&](const Tensor& tensor) { + return (skip_dtype_check || tensor.dtype() == expected_dtype) && + tensor.device() == expected_device && tensor.layout() == at::kStrided && + tensor.is_non_overlapping_and_dense(); + }; + + for (const auto& tensorList : tensorLists) { + for (const auto& tensor : tensorList) { + if (!is_tensor_okay(tensor)) { + return false; + } + } + } + + return true; +} + +// Helper function called in check_fast_path_restrictions to check if +// corresponding tensors in tensor lists have the same sizes and strides. +inline bool _check_tensors_share_sizes_and_strides( + ArrayRef tensorLists) { + auto is_diff_stride = [](const IntArrayRef& size, + const IntArrayRef& left_stride, + const IntArrayRef& right_stride) -> bool { + const size_t size_size = size.size(); + for (const auto dim : c10::irange(size_size)) { + if (size[dim] == 1) + continue; + if (left_stride[dim] != right_stride[dim]) { + return true; + } + } + return false; + }; + for (const auto i : c10::irange(1, tensorLists.size())) { + for (const auto j : c10::irange(tensorLists[0].size())) { + if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() || + is_diff_stride( + tensorLists[0][j].sizes(), + tensorLists[0][j].strides(), + tensorLists[i][j].strides())) { + return false; + } + } + } + + return true; +} + +// Helper function called in check_fast_path_restrictions to check whether +// all tensors type promote properly with the scalars in scalarList. This +// function assumes that _check_tensors_share_device_and_dtype has already been +// called so that all corresponding tensors in tensorLists have the same dtype. +// Then, it is sufficient to check the type promotion with just one tensorList. +inline bool _check_tensors_do_type_promotion_with_scalars( + TensorList tensorList, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + for (const auto i : c10::irange(tensorList.size())) { + // For division, integer inputs will result in float. + if (does_op_promote_integer_inputs_to_float) { + if (at::isIntegralType( + tensorList[i].scalar_type(), /*includeBool*/ true)) { + return false; + } + } + if (!scalarList.empty()) { + const auto& scalar = + scalarList.size() == 1 ? scalarList[0] : scalarList[i]; + const auto& tensor = tensorList[i]; + // note(mkozuki): This check might be responsible for + // `_foreach_add(bool_tensors, bool_tensors)` being pushed to slow path. + if (tensor.scalar_type() != at::native::result_type(scalar, tensor)) { + return false; + } + } + } + + return true; +} + +// To go via 'fast' path, several conditions must be satisfied +// - All tensors in all lists must have the same dtype. +// - All tensors must be on the same device +// - All tensors must have strided layout +// - All tensors must be non-overlapping and dense +// - Resulting tensor must have the same dtype as the input one + +// [note: what's ``does_op_promote_integer_inputs_to_float=true``?] +// ``does_op_promote_integer_inputs_to_float=true`` means that the result of +// the op will be float even if inputs are integer or boolean, which +// currently fast path does not support. In short, this flag, when +// turned on, gatekeeps the op from going down the fastpath. + +// Please, make sure to call check_foreach_api_restrictions before calling this +// method. There is a set of preconditions that have to be satisfied. +inline bool check_fast_path_restrictions( + ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + return _check_tensors_share_device_and_dtype(tensorLists) && + _check_tensors_share_sizes_and_strides(tensorLists) && + _check_tensors_do_type_promotion_with_scalars( + tensorLists[0], + scalarList, + does_op_promote_integer_inputs_to_float); +} + +inline std::vector convert_tensor_to_scalar_list( + const Tensor& scalarList_, + int64_t expect_length) { + std::vector scalarList; + TORCH_CHECK( + scalarList_.device() == c10::kCPU, + "Expected scalars to be on CPU, got ", + scalarList_.device(), + " instead."); + TORCH_CHECK( + scalarList_.is_contiguous(), "Expected scalars to be contiguous."); + TORCH_CHECK( + scalarList_.dim() == 1, + "Expected packed scalar Tensor to be of dimension 1. Got ", + scalarList_.dim(), + " instead."); + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( + kComplexHalf, + kHalf, + kBool, + kBFloat16, + scalarList_.scalar_type(), + "convert_tensor_to_scalar_list", + [&]() { + const scalar_t* scalar_data = scalarList_.const_data_ptr(); + TORCH_CHECK( + (expect_length == scalarList_.size(0)), + "Expected length of scalars to match input of length ", + expect_length, + " but got ", + scalarList_.size(0), + " instead."); + for (int64_t i = 0; i < scalarList_.size(0); i++) { + scalarList.emplace_back(scalar_data[i]); + } + }); + return scalarList; +} + +// see: [note: what's ``does_op_promote_integer_inputs_to_float=true``?] +inline bool can_use_fast_route( + ArrayRef tensorLists, + ArrayRef scalarList = {}, + bool does_op_promote_integer_inputs_to_float = false) { + return check_fast_path_restrictions( + tensorLists, scalarList, does_op_promote_integer_inputs_to_float); +} + +// see: [note: what's ``does_op_promote_integer_inputs_to_float=true``?] +inline bool can_use_fast_route( + TensorList tensors1, + TensorList tensors2, + bool does_op_promote_integer_inputs_to_float = false) { + return can_use_fast_route( + {tensors1, tensors2}, {}, does_op_promote_integer_inputs_to_float); +} + +using DeviceDtypeKey = std::pair; +using IndicesT = std::vector; +using nested_optional_tensorvec_t = + std::vector>>; +using TensorsAndIndicesT = std::pair; +using FlatMap = std::unordered_map< + DeviceDtypeKey, + TensorsAndIndicesT, + ParamsHash>; + +inline FlatMap _group_tensors_by_first_tensors_device_and_dtype( + const nested_optional_tensorvec_t& nested_tensorlist, + const bool with_indices) { + FlatMap grouped_tensors_with_indices; + + TORCH_CHECK(!nested_tensorlist.empty()); + TORCH_CHECK(!nested_tensorlist[0].empty()); + const auto num_lists = nested_tensorlist.size(); + const auto num_tensors = nested_tensorlist[0].size(); + + TORCH_CHECK(std::all_of( + nested_tensorlist.cbegin(), + nested_tensorlist.cend(), + [&](const auto& tensorlist) -> bool { + // note(crcrpar): Allow empty tensorlists following + // ref: + // https://github.com/pytorch/pytorch/blob/85885301fd3c6adb8b9dc3cf7afadf6945566684/torch/utils/_foreach_utils.py#L21-L24 + return tensorlist.size() == num_tensors || tensorlist.size() == 0; + })); + + for (const auto& tensor_index : c10::irange(num_tensors)) { + const auto key = [&]() -> DeviceDtypeKey { + const auto t = nested_tensorlist[0][tensor_index]; + TORCH_CHECK( + t.has_value(), + "Tensors of the first list of nested Tensor lists are supposed to be defined but ", + "the ", + tensor_index, + "-th Tensor is not."); + return {t->device(), t->scalar_type()}; + }(); + TORCH_CHECK( + std::all_of( + nested_tensorlist.cbegin(), + nested_tensorlist.cend(), + [&](const auto& tensorlist) -> bool { + if (tensorlist.size() == 0) { + return true; + } + const auto& tensor = tensorlist[tensor_index]; + // note(crcrpar): Currently the scope of this function is + // optimizers so there could be `state_steps` and other scalars + // whose elements are float tensors no matter what the parameter's + // dtype is. + if (!tensor.has_value()) { + return true; + } else { + const auto s = tensor->scalar_type(); + const auto d = tensor->device(); + // Note: `step` or `state_step` is float32 by default. + if (key.first == d) { + return key.second == s || s == at::ScalarType::Float || + s == at::ScalarType::Double; + } else if (d.is_cpu()) { + // note(crcrpar): There are some test cases (e.g. + // TestOptim::test_adam) where state_steps are on CPU and the + // others are on CUDA. Currently a state_step Tensor has the + // dtype of float. + return s == at::ScalarType::Float || + s == at::ScalarType::Double; + } else { + return false; + } + } + }), + "Tensors of the same index must be on the same device and the same dtype except `step` tensors that can be CPU and float32/64 notwithstanding"); + if (!grouped_tensors_with_indices.count(key)) { + grouped_tensors_with_indices.insert( + {key, + TensorsAndIndicesT{ + [&]() -> nested_optional_tensorvec_t { + nested_optional_tensorvec_t nested_tensorvec; + nested_tensorvec.reserve(num_lists); + for (const auto& i : c10::irange(num_lists)) { + std::vector> tensors; + if (!nested_tensorlist[i].empty()) { + // NB: num_tensors is the max possible length for any of + // the inner lists of tensor references. Reserving the max + // trades memory for perf. This should not have significant + // impact. + tensors.reserve(num_tensors); + } + nested_tensorvec.emplace_back(tensors); + } + return nested_tensorvec; + }(), + [&]() -> IndicesT { + if (!with_indices) { + return {}; + } else { + IndicesT indices; + indices.reserve(num_tensors); + return indices; + } + }()}}); + } + for (const auto& list_index : c10::irange(num_lists)) { + if (!nested_tensorlist[list_index].empty()) { + grouped_tensors_with_indices[key].first[list_index].emplace_back( + nested_tensorlist[list_index][tensor_index]); + } + } + if (with_indices) { + grouped_tensors_with_indices[key].second.emplace_back(tensor_index); + } + } + + return grouped_tensors_with_indices; +} + +} // namespace +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h new file mode 100644 index 0000000000000000000000000000000000000000..de2296634e04511497827128c4483c5e75c6a674 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/MathBitsFallback.h @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include + +#include +#endif + +namespace at::native { +// This fallback should only be used for operations that are self inverse and have a corresponding tensor +// bit (internally implemented using DispatchKey) to maintain the state on tensor using tensor bit. +// Currently there are two tensor bits that trigger this fallback: conjugate bit and negative bit. +// Conjugate bit is set on a tensor when `.conj()` is called and neg bit is set on a tensor when `.conj().imag` is called. + +// NOTE: To use this fallback, `clone` and `copy_` should fully understand and be able to correctly handle the semantic of your math bit. +struct MathOpFallback { + MathOpFallback(DispatchKey key_, string op_name_) : key(key_), op_name(std::move(op_name_)) {} + virtual bool is_bit_set(const Tensor&) = 0; + void fallback_impl(const c10::OperatorHandle& op, DispatchKeySet dispatch_keys, torch::jit::Stack* stack) { + /* + Situations to handle: + 1. Out-of-place operation. Easy: materialize all inputs and + call it a day. + 2. Inplace operation. Desugar x.add_(2) into x.conj_().add_(2).conj_(). + Materialize other inputs as in (1). + 3. out= operation. Desugar add(x, 2, out=y) into y.copy_(add(x, 2)) + Materialize other inputs as in (1). + + It is important to be able to tell if we READ from an argument and if we + WRITE to an argument. Conservative approach is to assume that we always + READ from an argument, but in out= operations you can skip + conjugating inputs on entry that never get used. In the current schema we + can't easily tell if the operation is in in-place or out= operation. + + Note: + 1. Mutable tensorlists containing tensors whose math bit set to true are disallowed. + 2. Mutable tensors with math bit set to true are unconditionally cloned to ensure + correct behavior in the case when the mutable tensor shares memory with non mutable arguments. + + If we were to in-place resolve the math bit for mutable inputs, then the non-mutable inputs sharing partial or full memory + with these mutable inputs would read into wrong values in the following cases: + 1. Non mutable inputs have their math bit set to false. + 2. Math bit for mutable input(s) is resolved before the non mutable inputs (with bit set to true and sharing memory + with one or more mutable arg(s)) are cloned. + At the end, the final value of the mutable arguments from the stack are copied into the original input mutable tensor inputs. + */ + const auto& arguments = op.schema().arguments(); + const auto num_arguments = arguments.size(); + const auto stack_start = stack->size() - num_arguments; + + std::optional is_write; + for (const auto i : c10::irange(num_arguments)) { + // Three possible states: + // 1. alias_info has no value --> out-of-place operation + // 2. alias_info does have a value, alias_info->is_write=True --> in-place or out= operation + // 3. alias_info does have a value, alias_info->is_write=False --> view operation + const AliasInfo* alias_info = arguments[i].alias_info(); + if (alias_info != nullptr) { + if (is_write.has_value()) { + TORCH_CHECK(*is_write == alias_info->isWrite(), + "Unsupported operator for ", op_name, " fallback: ", op.schema().name(), + op_name, " fallback doesn't work for operators with a mix " + "mutable and non-mutable inputs that alias with outputs, " + "this must be implemented manually. " + "If you got this error on a core op, please report a bug to PyTorch."); + } else { + is_write = alias_info->isWrite(); + } + } + } + + if (is_write.has_value() && !*is_write) { + // We assume that view operators automatically handle the math bit + // correctly by propagating the dispatch key in key_set. + // This is not necessarily always right, so you should test these cases. + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + return; + } + + // Mutable inputs with math bit set to True and their clones + std::vector> mutable_inputs_with_their_clones; + for (const auto i : c10::irange(num_arguments)) { + auto& ivalue = (*stack)[stack_start + i]; + if (!(ivalue.isTensor() || ivalue.isTensorList())) { + continue; + } + const auto& argument = arguments[i]; + bool mut_arg = false; + if (argument.alias_info()) { + // Was already tested by is_write loop above + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(argument.alias_info()->isWrite()); + mut_arg = true; + } + if (ivalue.isTensor()) { + if (!is_bit_set(ivalue.toTensor())) { + continue; + } + auto tensor = std::move(ivalue).toTensor(); + auto resolved_tensor = at::clone(tensor); + if (mut_arg) { + TORCH_CHECK(mutable_inputs_with_their_clones.empty(), op_name, " fallback does not support operators with more than one mutable tensors with ", + op_name, "bit set to true."); + mutable_inputs_with_their_clones.emplace_back(std::move(tensor), resolved_tensor); + } + (*stack)[stack_start + i] = std::move(resolved_tensor); + } else if (ivalue.isTensorList()) { + auto tensors = std::move(ivalue).toTensorList(); + for(const auto j : c10::irange(tensors.size())) { + const auto& tensor = tensors[j]; + if (!is_bit_set(tensor)) { + continue; + } + TORCH_CHECK(!mut_arg, " fallback doesn't currently support mutable TensorLists with ", + op_name, " inputs. Please materialize all the ", op_name, " input tensor(s) in the mutable TensorList inputs before calling ", + op.schema().name()); + tensors[j] = at::clone(tensor); + } + (*stack)[stack_start + i] = std::move(tensors); + } + } + + op.redispatchBoxed(dispatch_keys & c10::DispatchKeySet(DispatchKeySet::FULL_AFTER, key), stack); + + TORCH_INTERNAL_ASSERT(mutable_inputs_with_their_clones.size() <= 1); + + for (std::pair mut_tensors: mutable_inputs_with_their_clones) { + auto& mutable_input = mut_tensors.first; + auto& cloned_mutable_input = mut_tensors.second; + auto& ivalue = (*stack)[stack_start]; + auto returned_output = std::move(ivalue).toTensor(); + + // sanity check to ensure that the tensor in stack aliases the cloned_mutable_input + TORCH_INTERNAL_ASSERT(cloned_mutable_input.is_same(returned_output)); + + // necessary for out= arg + at::native::resize_output(mutable_input, returned_output.sizes()); + + mutable_input.copy_(returned_output); + (*stack)[stack_start] = std::move(mutable_input); + } + } + + virtual ~MathOpFallback() = default; + + DispatchKey key; + string op_name; +}; + +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..1ba99e77b65c8d73bcab5a1ace75882682bb96ca --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/Normalization.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + +namespace at::native { + +using renorm_scale_factor_fn = void (*) (TensorIteratorBase& iter, double maxnorm); +DECLARE_DISPATCH(renorm_scale_factor_fn, renorm_scale_factor_stub); + +enum class BatchNormBackend { + Native, + Cudnn, + Miopen, +}; + +TORCH_API BatchNormBackend _select_batch_norm_backend(const Tensor& input, const Tensor& weight, const Tensor& bias, const Tensor& running_mean, const Tensor& running_var, bool training, double eps); + +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h new file mode 100644 index 0000000000000000000000000000000000000000..f3e54c2a40b425eb07fdecdb1164690bf78b4996 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/RNN.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +namespace at::native { + +using lstm_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool, bool); +using rnn_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool, bool); +using lstm_packed_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&, const Tensor&, TensorList, TensorList, bool, int64_t, double, bool, bool); +using rnn_packed_fn = void(*)(Tensor&, Tensor&, const Tensor&, const Tensor&, const Tensor&, TensorList, bool, int64_t, double, bool, bool); + +DECLARE_DISPATCH(lstm_fn, lstm_cudnn_stub); +DECLARE_DISPATCH(lstm_fn, lstm_miopen_stub); +DECLARE_DISPATCH(lstm_fn, lstm_mkldnn_stub); +DECLARE_DISPATCH(rnn_fn, gru_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, gru_miopen_stub); +DECLARE_DISPATCH(rnn_fn, rnn_tanh_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, rnn_tanh_miopen_stub); +DECLARE_DISPATCH(rnn_fn, rnn_relu_cudnn_stub); +DECLARE_DISPATCH(rnn_fn, rnn_relu_miopen_stub); +DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_cudnn_stub); +DECLARE_DISPATCH(lstm_packed_fn, lstm_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, gru_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, gru_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_tanh_packed_miopen_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_cudnn_stub); +DECLARE_DISPATCH(rnn_packed_fn, rnn_relu_packed_miopen_stub); + +inline void check_attributes(const Tensor& input, const TensorList& params, const TensorList& hiddens, bool check_dtype=false) { + auto input_device = input.device(); + auto input_dtype = input.scalar_type(); + + auto check_tensors = [&](const std::string& name, const Tensor& t) { + if (!t.defined()) return; + auto t_device = t.device(); + TORCH_CHECK(input_device == t_device, + "Input and ", name, " tensors are not at the same device, found input tensor at ", + input_device, " and ", name, " tensor at ", t_device); + if (check_dtype) { + auto t_dtype = t.scalar_type(); + TORCH_CHECK(input_dtype == t_dtype, + "Input and ", name, " tensors are not the same dtype, found input tensor with ", + input_dtype, " and ", name, " tensor with ", t_dtype); + } + }; + + for (const auto& h : hiddens) check_tensors("hidden", h); + for (const auto& p : params) check_tensors("parameter", p); +} + +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4845c758461c3435c83eaf7cafa3ddd6c9d784 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/native/TypeProperties.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include + +namespace at::native { + +struct ResultTypeState { + c10::ScalarType dimResult = ScalarType::Undefined; + c10::ScalarType wrappedResult = ScalarType::Undefined; + c10::ScalarType zeroResult = ScalarType::Undefined; +}; + +TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state); +TORCH_API ResultTypeState update_result_type_state(const Scalar& scalar, const ResultTypeState& in_state); +TORCH_API ScalarType result_type(const ResultTypeState& state); + +TORCH_API ScalarType result_type(ITensorListRef tensors); + +} // namespace at::native diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_ops.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3a199140045dea9eec651952a05003f4f2af60e2 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _neg_view { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_neg_view") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_neg_view(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd91134d5d49a9cb0707aea4c47b86ea3d32cc37 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor minimum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & minimum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..04f165216f879320fc86da80527d24984af47692 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_bessel_y1 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..93c8f776daf6f24878d81bd111c72552d7481a31 --- /dev/null +++ b/pllava/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at