repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
grpc
|
grpc-master/src/csharp/Grpc.Tools.Tests/scripts/fakeprotoc.py
|
#!/usr/bin/env python3
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Fake protobuf compiler for use in the Grpc.Tools MSBuild integration
# unit tests. Its purpose is to be called from the Grpc.Tools
# Google.Protobuf.Tools.targets MSBuild file instead of the actual protoc
# compiler. This script:
# - parses the command line arguments
# - generates expected dependencies file
# - generates dummy .cs files that are expected by the tests
# - writes a JSON results file containing the arguments passed in
# Configuration is done via environment variables as it is not possible
# to pass additional argument when called from the MSBuild scripts under test.
#
# Environment variables:
# FAKEPROTOC_PROJECTDIR - project directory
# FAKEPROTOC_OUTDIR - output directory for generated files and output file
# FAKEPROTOC_GENERATE_EXPECTED - list of expected generated files in format:
# file1.proto:csfile1.cs;csfile2.cs|file2.proto:csfile3.cs;csfile4.cs|...
import datetime
import hashlib
import json
import os
import sys
# Set to True to write out debug messages from this script
_dbg = True
# file to which write the debug log
_dbgfile = None
def _open_debug_log(filename):
"""Create debug file for this script."""
global _dbgfile
if _dbg:
# append mode since this script may be called multiple times
# during one build/test
_dbgfile = open(filename, "a")
def _close_debug_log():
"""Close the debug log file."""
if _dbgfile:
_dbgfile.close()
def _write_debug(msg):
"""Write to the debug log file if debug is enabled."""
if _dbg and _dbgfile:
print(msg, file=_dbgfile, flush=True)
def _read_protoc_arguments():
"""
Get the protoc argument from the command line and
any response files specified on the command line.
Returns the list of arguments.
"""
_write_debug("\nread_protoc_arguments")
result = []
for arg in sys.argv[1:]:
_write_debug(" arg: " + arg)
if arg.startswith("@"):
# TODO(jtattermusch): inserting a "commented out" argument feels hacky
result.append("# RSP file: %s" % arg)
rsp_file_name = arg[1:]
result.extend(_read_rsp_file(rsp_file_name))
else:
result.append(arg)
return result
def _read_rsp_file(rspfile):
"""
Returns list of arguments from a response file.
"""
_write_debug("\nread_rsp_file: " + rspfile)
result = []
with open(rspfile, "r") as rsp:
for line in rsp:
line = line.strip()
_write_debug(" line: " + line)
result.append(line)
return result
def _parse_protoc_arguments(protoc_args, projectdir):
"""
Parse the protoc arguments from the provided list
"""
_write_debug("\nparse_protoc_arguments")
arg_dict = {}
for arg in protoc_args:
_write_debug("Parsing: %s" % arg)
# All arguments containing file or directory paths are
# normalized by converting all '\' and changed to '/'
if arg.startswith("--"):
# Assumes that cmdline arguments are always passed in the
# "--somearg=argvalue", which happens to be the form that
# msbuild integration uses, but it's not the only way.
(name, value) = arg.split("=", 1)
if (
name == "--dependency_out"
or name == "--grpc_out"
or name == "--csharp_out"
):
# For args that contain a path, make the path absolute and normalize it
# to make it easier to assert equality in tests.
value = _normalized_absolute_path(value)
if name == "--proto_path":
# for simplicity keep this one as relative path rather than absolute path
# since it is an input file that is always be near the project file
value = _normalized_relative_to_projectdir(value, projectdir)
_add_protoc_arg_to_dict(arg_dict, name, value)
elif arg.startswith("#"):
pass # ignore
else:
# arg represents a proto file name
arg = _normalized_relative_to_projectdir(arg, projectdir)
_add_protoc_arg_to_dict(arg_dict, "protofile", arg)
return arg_dict
def _add_protoc_arg_to_dict(arg_dict, name, value):
"""
Add the arguments with name/value to a multi-dictionary of arguments
"""
if name not in arg_dict:
arg_dict[name] = []
arg_dict[name].append(value)
def _normalized_relative_to_projectdir(file, projectdir):
"""Convert a file path to one relative to the project directory."""
try:
return _normalize_slashes(
os.path.relpath(os.path.abspath(file), projectdir)
)
except ValueError:
# On Windows if the paths are on different drives then we get this error
# Just return the absolute path
return _normalize_slashes(os.path.abspath(file))
def _normalized_absolute_path(file):
"""Returns normalized absolute path to file."""
return _normalize_slashes(os.path.abspath(file))
def _normalize_slashes(path):
"""Change all backslashes to forward slashes to make comparing path strings easier."""
return path.replace("\\", "/")
def _write_or_update_results_json(log_dir, protofile, protoc_arg_dict):
"""Write or update the results JSON file"""
# Read existing json.
# Since protoc may be called more than once each build/test if there is
# more than one protoc file, we read the existing data to add to it.
fname = os.path.abspath("%s/results.json" % log_dir)
if os.path.isfile(fname):
# Load the original contents.
with open(fname, "r") as forig:
results_json = json.load(forig)
else:
results_json = {}
results_json["Files"] = {}
results_json["Files"][protofile] = protoc_arg_dict
results_json["Metadata"] = {"timestamp": str(datetime.datetime.now())}
with open(fname, "w") as fout:
json.dump(results_json, fout, indent=4)
def _parse_generate_expected(generate_expected_str):
"""
Parse FAKEPROTOC_GENERATE_EXPECTED that specifies the proto files
and the cs files to generate. We rely on the test to say what is
expected rather than trying to work it out in this script.
The format of the input is:
file1.proto:csfile1.cs;csfile2.cs|file2.proto:csfile3.cs;csfile4.cs|...
"""
_write_debug("\nparse_generate_expected")
result = {}
entries = generate_expected_str.split("|")
for entry in entries:
parts = entry.split(":")
pfile = _normalize_slashes(parts[0])
csfiles = parts[1].split(";")
result[pfile] = csfiles
_write_debug(pfile + " : " + str(csfiles))
return result
def _get_cs_files_to_generate(protofile, proto_to_generated):
"""Returns list of .cs files to generated based on FAKEPROTOC_GENERATE_EXPECTED env."""
protoname_normalized = _normalize_slashes(protofile)
cs_files_to_generate = proto_to_generated.get(protoname_normalized)
return cs_files_to_generate
def _is_grpc_out_file(csfile):
"""Return true if the file is one that would be generated by gRPC plugin"""
# This is using the heuristics of checking that the name of the file
# matches *Grpc.cs which is the name that the gRPC plugin would produce.
return csfile.endswith("Grpc.cs")
def _generate_cs_files(
protofile, cs_files_to_generate, grpc_out_dir, csharp_out_dir, projectdir
):
"""Create expected cs files."""
_write_debug("\ngenerate_cs_files")
if not cs_files_to_generate:
_write_debug("No .cs files matching proto file name %s" % protofile)
return
if not os.path.isabs(grpc_out_dir):
# if not absolute, it is relative to project directory
grpc_out_dir = os.path.abspath("%s/%s" % (projectdir, grpc_out_dir))
if not os.path.isabs(csharp_out_dir):
# if not absolute, it is relative to project directory
csharp_out_dir = os.path.abspath("%s/%s" % (projectdir, csharp_out_dir))
# Ensure directories exist
if not os.path.isdir(grpc_out_dir):
os.makedirs(grpc_out_dir)
if not os.path.isdir(csharp_out_dir):
os.makedirs(csharp_out_dir)
timestamp = str(datetime.datetime.now())
for csfile in cs_files_to_generate:
if csfile.endswith("Grpc.cs"):
csfile_fullpath = "%s/%s" % (grpc_out_dir, csfile)
else:
csfile_fullpath = "%s/%s" % (csharp_out_dir, csfile)
_write_debug("Creating: %s" % csfile_fullpath)
with open(csfile_fullpath, "w") as fout:
print("// Generated by fake protoc: %s" % timestamp, file=fout)
def _create_dependency_file(
protofile,
cs_files_to_generate,
dependencyfile,
grpc_out_dir,
csharp_out_dir,
):
"""Create the expected dependency file."""
_write_debug("\ncreate_dependency_file")
if not dependencyfile:
_write_debug("dependencyfile is not set.")
return
if not cs_files_to_generate:
_write_debug("No .cs files matching proto file name %s" % protofile)
return
_write_debug("Creating dependency file: %s" % dependencyfile)
with open(dependencyfile, "w") as out:
nfiles = len(cs_files_to_generate)
for i in range(0, nfiles):
csfile = cs_files_to_generate[i]
if csfile.endswith("Grpc.cs"):
cs_filename = os.path.join(grpc_out_dir, csfile)
else:
cs_filename = os.path.join(csharp_out_dir, csfile)
if i == nfiles - 1:
print("%s: %s" % (cs_filename, protofile), file=out)
else:
print("%s \\" % cs_filename, file=out)
def _getenv(name):
# Note there is a bug in .NET core 3.x that lowercases the environment
# variable names when they are added via Process.StartInfo, so we need to
# check both cases here (only an issue on Linux which is case sensitive)
value = os.getenv(name)
if value is None:
value = os.getenv(name.lower())
return value
def _get_argument_last_occurrence_or_none(protoc_arg_dict, name):
# If argument was passed multiple times, take the last occurrence.
# If the value does not exist then return None
values = protoc_arg_dict.get(name)
if values is not None:
return values[-1]
return None
def main():
# Check environment variables for the additional arguments used in the tests.
projectdir = _getenv("FAKEPROTOC_PROJECTDIR")
if not projectdir:
print("FAKEPROTOC_PROJECTDIR not set")
sys.exit(1)
projectdir = os.path.abspath(projectdir)
# Output directory for generated files and output file
protoc_outdir = _getenv("FAKEPROTOC_OUTDIR")
if not protoc_outdir:
print("FAKEPROTOC_OUTDIR not set")
sys.exit(1)
protoc_outdir = os.path.abspath(protoc_outdir)
# Get list of expected generated files from env variable
generate_expected = _getenv("FAKEPROTOC_GENERATE_EXPECTED")
if not generate_expected:
print("FAKEPROTOC_GENERATE_EXPECTED not set")
sys.exit(1)
# Prepare the debug log
log_dir = os.path.join(protoc_outdir, "log")
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
_open_debug_log("%s/fakeprotoc_log.txt" % log_dir)
_write_debug(
(
"##### fakeprotoc called at %s\n"
+ "FAKEPROTOC_PROJECTDIR = %s\n"
+ "FAKEPROTOC_GENERATE_EXPECTED = %s\n"
)
% (datetime.datetime.now(), projectdir, generate_expected)
)
proto_to_generated = _parse_generate_expected(generate_expected)
protoc_args = _read_protoc_arguments()
protoc_arg_dict = _parse_protoc_arguments(protoc_args, projectdir)
# If argument was passed multiple times, take the last occurrence of it.
# TODO(jtattermusch): handle multiple occurrences of the same argument
dependencyfile = _get_argument_last_occurrence_or_none(
protoc_arg_dict, "--dependency_out"
)
grpcout = _get_argument_last_occurrence_or_none(
protoc_arg_dict, "--grpc_out"
)
csharpout = _get_argument_last_occurrence_or_none(
protoc_arg_dict, "--csharp_out"
)
# --grpc_out might not be set in which case use --csharp_out
if grpcout is None:
grpcout = csharpout
if len(protoc_arg_dict.get("protofile")) != 1:
# regular protoc can process multiple .proto files passed at once, but we know
# the Grpc.Tools msbuild integration only ever passes one .proto file per invocation.
print(
"Expecting to get exactly one .proto file argument per fakeprotoc"
" invocation."
)
sys.exit(1)
protofile = protoc_arg_dict.get("protofile")[0]
cs_files_to_generate = _get_cs_files_to_generate(
protofile=protofile, proto_to_generated=proto_to_generated
)
_create_dependency_file(
protofile=protofile,
cs_files_to_generate=cs_files_to_generate,
dependencyfile=dependencyfile,
grpc_out_dir=grpcout,
csharp_out_dir=csharpout,
)
_generate_cs_files(
protofile=protofile,
cs_files_to_generate=cs_files_to_generate,
grpc_out_dir=grpcout,
csharp_out_dir=csharpout,
projectdir=projectdir,
)
_write_or_update_results_json(
log_dir=log_dir, protofile=protofile, protoc_arg_dict=protoc_arg_dict
)
_close_debug_log()
if __name__ == "__main__":
main()
| 14,156
| 32.787589
| 93
|
py
|
grpc
|
grpc-master/src/benchmark/gen_build_yaml.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import yaml
os.chdir(os.path.dirname(sys.argv[0]) + "/../..")
out = {}
out["libs"] = [
{
"name": "benchmark",
"build": "private",
"language": "c++",
"secure": False,
"defaults": "benchmark",
"src": sorted(glob.glob("third_party/benchmark/src/*.cc")),
"headers": sorted(
glob.glob("third_party/benchmark/src/*.h")
+ glob.glob("third_party/benchmark/include/benchmark/*.h")
),
}
]
print(yaml.dump(out))
| 1,138
| 26.119048
| 74
|
py
|
grpc
|
grpc-master/src/boringssl/gen_build_yaml.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
import yaml
run_dir = os.path.dirname(sys.argv[0])
sources_path = os.path.abspath(
os.path.join(run_dir, "../../third_party/boringssl-with-bazel/sources.json")
)
try:
with open(sources_path, "r") as s:
sources = json.load(s)
except IOError:
sources_path = os.path.abspath(
os.path.join(
run_dir, "../../../../third_party/openssl/boringssl/sources.json"
)
)
with open(sources_path, "r") as s:
sources = json.load(s)
def map_dir(filename):
return "third_party/boringssl-with-bazel/" + filename
class Grpc(object):
"""Adapter for boring-SSL json sources files."""
def __init__(self, sources):
self.yaml = None
self.WriteFiles(sources)
def WriteFiles(self, files):
test_binaries = ["ssl_test", "crypto_test"]
asm_outputs = {
key: value
for key, value in files.items()
if any(f.endswith(".S") or f.endswith(".asm") for f in value)
}
self.yaml = {
"#": "generated with src/boringssl/gen_build_yaml.py",
"raw_boringssl_build_output_for_debugging": {
"files": files,
},
"libs": [
{
"name": "boringssl",
"build": "private",
"language": "c",
"secure": False,
"src": sorted(
map_dir(f) for f in files["ssl"] + files["crypto"]
),
"asm_src": {
k: [map_dir(f) for f in value]
for k, value in asm_outputs.items()
},
"headers": sorted(
map_dir(f)
# We want to include files['fips_fragments'], but not build them as objects.
# See https://boringssl-review.googlesource.com/c/boringssl/+/16946
for f in files["ssl_headers"]
+ files["ssl_internal_headers"]
+ files["crypto_headers"]
+ files["crypto_internal_headers"]
+ files["fips_fragments"]
),
"boringssl": True,
"defaults": "boringssl",
},
{
"name": "boringssl_test_util",
"build": "private",
"language": "c++",
"secure": False,
"boringssl": True,
"defaults": "boringssl",
"src": [map_dir(f) for f in sorted(files["test_support"])],
},
],
"targets": [
{
"name": "boringssl_%s" % test,
"build": "test",
"run": False,
"secure": False,
"language": "c++",
"src": sorted(map_dir(f) for f in files[test]),
"vs_proj_dir": "test/boringssl",
"boringssl": True,
"defaults": "boringssl",
"deps": [
"boringssl_test_util",
"boringssl",
],
}
for test in test_binaries
],
"tests": [
{
"name": "boringssl_%s" % test,
"args": [],
"exclude_configs": ["asan", "ubsan"],
"ci_platforms": ["linux", "mac", "posix", "windows"],
"platforms": ["linux", "mac", "posix", "windows"],
"flaky": False,
"gtest": True,
"language": "c++",
"boringssl": True,
"defaults": "boringssl",
"cpu_cost": 1.0,
}
for test in test_binaries
],
}
grpc_platform = Grpc(sources)
print(yaml.dump(grpc_platform.yaml))
| 4,735
| 33.823529
| 100
|
py
|
grpc
|
grpc-master/src/zlib/gen_build_yaml.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import yaml
os.chdir(os.path.dirname(sys.argv[0]) + "/../..")
out = {}
try:
with open("third_party/zlib/CMakeLists.txt") as f:
cmake = f.read()
def cmpath(x):
return "third_party/zlib/%s" % x.replace(
"${CMAKE_CURRENT_BINARY_DIR}/", ""
)
def cmvar(name):
regex = r"set\(\s*"
regex += name
regex += r"([^)]*)\)"
return [cmpath(x) for x in re.search(regex, cmake).group(1).split()]
out["libs"] = [
{
"name": "z",
"zlib": True,
"defaults": "zlib",
"build": "private",
"language": "c",
"secure": False,
"src": sorted(cmvar("ZLIB_SRCS")),
"headers": sorted(
cmvar("ZLIB_PUBLIC_HDRS") + cmvar("ZLIB_PRIVATE_HDRS")
),
}
]
except:
pass
print(yaml.dump(out))
| 1,524
| 24.847458
| 76
|
py
|
grpc
|
grpc-master/bazel/_gevent_test_main.py
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gevent
from gevent import monkey
monkey.patch_all()
threadpool = gevent.hub.get_hub().threadpool
# Currently, each channel corresponds to a single native thread in the
# gevent threadpool. Thus, when the unit test suite spins up hundreds of
# channels concurrently, some will be starved out, causing the test to
# increase in duration. We increase the max size here so this does not
# happen.
threadpool.maxsize = 1024
threadpool.size = 32
import traceback, signal
from typing import Sequence
import grpc.experimental.gevent
grpc.experimental.gevent.init_gevent()
import gevent
import greenlet
import datetime
import grpc
import unittest
import sys
import os
import pkgutil
def trace_callback(event, args):
if event in ("switch", "throw"):
origin, target = args
sys.stderr.write("{} Transfer from {} to {} with {}\n".format(datetime.datetime.now(), origin, target, event))
else:
sys.stderr.write("Unknown event {}.\n".format(event))
sys.stderr.flush()
if os.getenv("GREENLET_TRACE") is not None:
greenlet.settrace(trace_callback)
def debug(sig, frame):
d={'_frame':frame}
d.update(frame.f_globals)
d.update(frame.f_locals)
sys.stderr.write("Traceback:\n{}".format("\n".join(traceback.format_stack(frame))))
import gevent.util; gevent.util.print_run_info()
sys.stderr.flush()
signal.signal(signal.SIGTERM, debug)
class SingleLoader(object):
def __init__(self, pattern: str):
loader = unittest.TestLoader()
self.suite = unittest.TestSuite()
tests = []
for importer, module_name, is_package in pkgutil.walk_packages([os.path.dirname(os.path.relpath(__file__))]):
if pattern in module_name:
module = importer.find_module(module_name).load_module(module_name)
tests.append(loader.loadTestsFromModule(module))
if len(tests) != 1:
raise AssertionError("Expected only 1 test module. Found {}".format(tests))
self.suite.addTest(tests[0])
def loadTestsFromNames(self, names: Sequence[str], module: str = None) -> unittest.TestSuite:
return self.suite
if __name__ == "__main__":
if len(sys.argv) != 2:
print(f"USAGE: {sys.argv[0]} TARGET_MODULE", file=sys.stderr)
target_module = sys.argv[1]
loader = SingleLoader(target_module)
runner = unittest.TextTestRunner()
result = gevent.spawn(runner.run, loader.suite)
result.join()
if not result.value.wasSuccessful():
sys.exit("Test failure.")
| 3,115
| 30.16
| 118
|
py
|
grpc
|
grpc-master/test/distrib/python/distribtest.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
# This code doesn't do much but makes sure the native extension is loaded
# which is what we are testing here.
channel = grpc.insecure_channel("localhost:1000")
del channel
print("Success!")
| 782
| 34.590909
| 74
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/import_from_this_package.py
|
# Copyright 2021 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trivial executable that imports Protobuf generated code where the
proto_library and py_proto_library are in different Bazel packages.
"""
import subpackage_pb2
| 747
| 38.368421
| 74
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/helloworld_moved.py
|
# Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from concurrent import futures
import contextlib
import datetime
import logging
import unittest
# TODO(https://github.com/grpc/grpc/issues/29284)
# isort: off
import grpc
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from google.cloud import helloworld_pb2
from google.cloud import helloworld_pb2_grpc
# isort: on
_HOST = "localhost"
_SERVER_ADDRESS = "{}:0".format(_HOST)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
request_in_flight = (
datetime.datetime.now() - request.request_initiation.ToDatetime()
)
request_duration = duration_pb2.Duration()
request_duration.FromTimedelta(request_in_flight)
return helloworld_pb2.HelloReply(
message="Hello, %s!" % request.name,
request_duration=request_duration,
)
@contextlib.contextmanager
def _listening_server():
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
port = server.add_insecure_port(_SERVER_ADDRESS)
server.start()
try:
yield port
finally:
server.stop(0)
class ImportTest(unittest.TestCase):
def test_import(self):
with _listening_server() as port:
with grpc.insecure_channel("{}:{}".format(_HOST, port)) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
request_timestamp = timestamp_pb2.Timestamp()
request_timestamp.GetCurrentTime()
response = stub.SayHello(
helloworld_pb2.HelloRequest(
name="you",
request_initiation=request_timestamp,
),
wait_for_ready=True,
)
self.assertEqual(response.message, "Hello, you!")
self.assertGreater(response.request_duration.nanos, 0)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 2,703
| 31.97561
| 79
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/import_from_proto_library_package.py
|
# Copyright 2021 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trivial executable that imports Protobuf generated code where the
proto_library and py_proto_library are in different Bazel packages.
"""
import in_subpackage.subpackage_pb2
| 761
| 39.105263
| 74
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/transitive_proto_dep.py
|
# Copyright 2021 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A trivial executable that imports helloworld_pb2, but NOT its transitive
dependencies."""
import helloworld_pb2
| 697
| 37.777778
| 75
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/helloworld.py
|
# Copyright 2019 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from concurrent import futures
import contextlib
import datetime
import logging
import unittest
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
_HOST = "localhost"
_SERVER_ADDRESS = "{}:0".format(_HOST)
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
request_in_flight = (
datetime.datetime.now() - request.request_initiation.ToDatetime()
)
request_duration = duration_pb2.Duration()
request_duration.FromTimedelta(request_in_flight)
return helloworld_pb2.HelloReply(
message="Hello, %s!" % request.name,
request_duration=request_duration,
)
@contextlib.contextmanager
def _listening_server():
server = grpc.server(futures.ThreadPoolExecutor())
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
port = server.add_insecure_port(_SERVER_ADDRESS)
server.start()
try:
yield port
finally:
server.stop(0)
class ImportTest(unittest.TestCase):
def test_import(self):
with _listening_server() as port:
with grpc.insecure_channel("{}:{}".format(_HOST, port)) as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
request_timestamp = timestamp_pb2.Timestamp()
request_timestamp.GetCurrentTime()
response = stub.SayHello(
helloworld_pb2.HelloRequest(
name="you",
request_initiation=request_timestamp,
),
wait_for_ready=True,
)
self.assertEqual(response.message, "Hello, you!")
self.assertGreater(response.request_duration.nanos, 0)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 2,591
| 32.230769
| 79
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/namespaced/upper/example/no_import_no_strip_test.py
|
# Copyright 2020 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
class ImportTest(unittest.TestCase):
def test_import(self):
from namespaced.upper.example.namespaced_example_pb2 import (
NamespacedExample,
)
namespaced_example = NamespacedExample()
namespaced_example.value = "hello"
# Superfluous assert, important part is namespaced example was imported.
self.assertEqual(namespaced_example.value, "hello")
def test_grpc(self):
from namespaced.upper.example.namespaced_example_pb2_grpc import (
NamespacedServiceStub,
)
# No error from import
self.assertEqual(1, 1)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 1,308
| 30.166667
| 80
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/namespaced/upper/example/import_strip_test.py
|
# Copyright 2020 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
class ImportTest(unittest.TestCase):
def test_import(self):
from foo.bar.namespaced_example_pb2 import NamespacedExample
namespaced_example = NamespacedExample()
namespaced_example.value = "hello"
# Superfluous assert, important part is namespaced example was imported.
self.assertEqual(namespaced_example.value, "hello")
def test_grpc(self):
from foo.bar.namespaced_example_pb2_grpc import NamespacedServiceStub
# No error from import
self.assertEqual(1, 1)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 1,224
| 31.236842
| 80
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/namespaced/upper/example/import_no_strip_test.py
|
# Copyright 2020 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
class ImportTest(unittest.TestCase):
def test_import(self):
from foo.bar.namespaced.upper.example.namespaced_example_pb2 import (
NamespacedExample,
)
namespaced_example = NamespacedExample()
namespaced_example.value = "hello"
# Superfluous assert, important part is namespaced example was imported.
self.assertEqual(namespaced_example.value, "hello")
def test_grpc(self):
from foo.bar.namespaced.upper.example.namespaced_example_pb2_grpc import (
NamespacedServiceStub,
)
# No error from import
self.assertEqual(1, 1)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 1,324
| 30.547619
| 82
|
py
|
grpc
|
grpc-master/test/distrib/bazel/python/namespaced/upper/example/no_import_strip_test.py
|
# Copyright 2020 the gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
class ImportTest(unittest.TestCase):
def test_import(self):
from namespaced_example_pb2 import NamespacedExample
namespaced_example = NamespacedExample()
namespaced_example.value = "hello"
# Superfluous assert, important part is namespaced example was imported.
self.assertEqual(namespaced_example.value, "hello")
def test_grpc(self):
from namespaced_example_pb2_grpc import NamespacedServiceStub
# No error from import
self.assertEqual(1, 1)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
| 1,208
| 30.815789
| 80
|
py
|
grpc
|
grpc-master/test/distrib/gcf/python/main.py
|
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functions_framework
from google.cloud import pubsub_v1
ps_client = pubsub_v1.PublisherClient()
_PROJECT_ID = "grpc-testing"
_PUBSUB_TOPIC = "gcf-distribtest-topic"
@functions_framework.http
def test_publish(request):
topic_path = ps_client.topic_path(_PROJECT_ID, _PUBSUB_TOPIC)
message = '{"function": "TEST"}'
message_bytes = message.encode("utf-8")
for _ in range(100):
future = ps_client.publish(topic_path, data=message_bytes)
return "ok", 200
| 1,067
| 31.363636
| 74
|
py
|
grpc
|
grpc-master/test/core/end2end/fuzzers/generate_client_examples_of_bad_closing_streams.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
os.chdir(os.path.dirname(sys.argv[0]))
streams = {
"server_hanging_response_1_header": (
[0, 0, 0, 4, 0, 0, 0, 0, 0]
+ [0, 0, 0, 1, 5, 0, 0, 0, 1] # settings frame # trailers
),
"server_hanging_response_2_header2": (
[0, 0, 0, 4, 0, 0, 0, 0, 0]
+ [0, 0, 0, 1, 4, 0, 0, 0, 1] # settings frame
+ [0, 0, 0, 1, 5, 0, 0, 0, 1] # headers # trailers
),
}
for name, stream in streams.items():
open("client_fuzzer_corpus/%s" % name, "w").write(bytearray(stream))
| 1,146
| 31.771429
| 74
|
py
|
grpc
|
grpc-master/test/core/http/test_server.py
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Server for httpcli_test"""
import argparse
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import os
import ssl
import sys
_PEM = os.path.abspath(
os.path.join(
os.path.dirname(sys.argv[0]),
"../../..",
"src/core/tsi/test_creds/server1.pem",
)
)
_KEY = os.path.abspath(
os.path.join(
os.path.dirname(sys.argv[0]),
"../../..",
"src/core/tsi/test_creds/server1.key",
)
)
print(_PEM)
open(_PEM).close()
argp = argparse.ArgumentParser(description="Server for httpcli_test")
argp.add_argument("-p", "--port", default=10080, type=int)
argp.add_argument("-s", "--ssl", default=False, action="store_true")
args = argp.parse_args()
print("server running on port %d" % args.port)
class Handler(BaseHTTPRequestHandler):
def good(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(
"<html><head><title>Hello world!</title></head>".encode("ascii")
)
self.wfile.write(
"<body><p>This is a test</p></body></html>".encode("ascii")
)
def do_GET(self):
if self.path == "/get":
self.good()
def do_POST(self):
content_len = self.headers.get("content-length")
content = self.rfile.read(int(content_len)).decode("ascii")
if self.path == "/post" and content == "hello":
self.good()
httpd = HTTPServer(("localhost", args.port), Handler)
if args.ssl:
ctx = ssl.SSLContext()
ctx.load_cert_chain(certfile=_PEM, keyfile=_KEY)
httpd.socket = ctx.wrap_socket(httpd.socket, server_side=True)
httpd.serve_forever()
| 2,316
| 28.705128
| 76
|
py
|
grpc
|
grpc-master/test/cpp/naming/manual_run_resolver_component_test.py
|
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
# The c-ares test suite doesn't get ran regularly on Windows, but
# this script provides a way to run a lot of the tests manually.
_MSBUILD_CONFIG = os.environ["CONFIG"]
os.chdir(os.path.join("..", "..", os.getcwd()))
# This port is arbitrary, but it needs to be available.
_DNS_SERVER_PORT = 15353
subprocess.call(
[
sys.executable,
"test\\cpp\\naming\\resolver_component_tests_runner.py",
"--test_bin_path",
"cmake\\build\\%s\\resolver_component_test.exe" % _MSBUILD_CONFIG,
"--dns_server_bin_path",
"test\\cpp\\naming\\utils\\dns_server.py",
"--records_config_path",
"test\\cpp\\naming\\resolver_test_record_groups.yaml",
"--dns_server_port",
str(_DNS_SERVER_PORT),
"--dns_resolver_bin_path",
"test\\cpp\\naming\\utils\\dns_resolver.py",
"--tcp_connect_bin_path",
"test\\cpp\\naming\\utils\\tcp_connect.py",
]
)
| 1,576
| 34.044444
| 74
|
py
|
grpc
|
grpc-master/test/cpp/naming/gen_build_yaml.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the naming tests."""
import collections
import hashlib
import json
import yaml
_LOCAL_DNS_SERVER_ADDRESS = "127.0.0.1:15353"
def _append_zone_name(name, zone_name):
return "%s.%s" % (name, zone_name)
def _build_expected_addrs_cmd_arg(expected_addrs):
out = []
for addr in expected_addrs:
out.append("%s,%s" % (addr["address"], str(addr["is_balancer"])))
return ";".join(out)
def _resolver_test_cases(resolver_component_data):
out = []
for test_case in resolver_component_data["resolver_component_tests"]:
target_name = _append_zone_name(
test_case["record_to_resolve"],
resolver_component_data["resolver_tests_common_zone_name"],
)
out.append(
{
"test_title": target_name,
"arg_names_and_values": [
("target_name", target_name),
(
"do_ordered_address_comparison",
test_case["do_ordered_address_comparison"],
),
(
"expected_addrs",
_build_expected_addrs_cmd_arg(
test_case["expected_addrs"]
),
),
(
"expected_chosen_service_config",
(test_case["expected_chosen_service_config"] or ""),
),
(
"expected_service_config_error",
(test_case["expected_service_config_error"] or ""),
),
(
"expected_lb_policy",
(test_case["expected_lb_policy"] or ""),
),
("enable_srv_queries", test_case["enable_srv_queries"]),
("enable_txt_queries", test_case["enable_txt_queries"]),
(
"inject_broken_nameserver_list",
test_case["inject_broken_nameserver_list"],
),
],
}
)
return out
def main():
resolver_component_data = ""
with open("test/cpp/naming/resolver_test_record_groups.yaml") as f:
resolver_component_data = yaml.safe_load(f)
json = {
"resolver_tests_common_zone_name": resolver_component_data[
"resolver_tests_common_zone_name"
],
# this data is required by the resolver_component_tests_runner.py.template
"resolver_component_test_cases": _resolver_test_cases(
resolver_component_data
),
}
print(yaml.safe_dump(json))
if __name__ == "__main__":
main()
| 3,410
| 32.116505
| 82
|
py
|
grpc
|
grpc-master/test/cpp/naming/resolver_component_tests_runner.py
|
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is auto-generated
import argparse
import os
import platform
import signal
import subprocess
import sys
import tempfile
import time
argp = argparse.ArgumentParser(description='Run c-ares resolver tests')
argp.add_argument('--test_bin_path', default=None, type=str,
help='Path to gtest test binary to invoke.')
argp.add_argument('--dns_server_bin_path', default=None, type=str,
help='Path to local DNS server python script.')
argp.add_argument('--records_config_path', default=None, type=str,
help=('Path to DNS records yaml file that '
'specifies records for the DNS sever. '))
argp.add_argument('--dns_server_port', default=None, type=int,
help=('Port that local DNS server is listening on.'))
argp.add_argument('--dns_resolver_bin_path', default=None, type=str,
help=('Path to the DNS health check utility.'))
argp.add_argument('--tcp_connect_bin_path', default=None, type=str,
help=('Path to the TCP health check utility.'))
argp.add_argument('--extra_args', default='', type=str,
help=('Comma-separate list of command args to '
'plumb through to --test_bin_path'))
args = argp.parse_args()
def test_runner_log(msg):
sys.stderr.write('\n%s: %s\n' % (__file__, msg))
def python_args(arg_list):
if platform.system() == 'Windows':
return [sys.executable] + arg_list
return arg_list
cur_resolver = os.environ.get('GRPC_DNS_RESOLVER')
if cur_resolver and cur_resolver != 'ares':
test_runner_log(('WARNING: cur resolver set to %s. This set of tests '
'needs to use GRPC_DNS_RESOLVER=ares.'))
test_runner_log('Exit 1 without running tests.')
sys.exit(1)
os.environ.update({'GRPC_TRACE': 'cares_resolver,cares_address_sorting'})
def wait_until_dns_server_is_up(args,
dns_server_subprocess,
dns_server_subprocess_output):
for i in range(0, 30):
test_runner_log('Health check: attempt to connect to DNS server over TCP.')
tcp_connect_subprocess = subprocess.Popen(python_args([
args.tcp_connect_bin_path,
'--server_host', '127.0.0.1',
'--server_port', str(args.dns_server_port),
'--timeout', str(1)]))
tcp_connect_subprocess.communicate()
if tcp_connect_subprocess.returncode == 0:
test_runner_log(('Health check: attempt to make an A-record '
'query to DNS server.'))
dns_resolver_subprocess = subprocess.Popen(python_args([
args.dns_resolver_bin_path,
'--qname', 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp',
'--server_host', '127.0.0.1',
'--server_port', str(args.dns_server_port)]),
stdout=subprocess.PIPE)
dns_resolver_stdout, _ = dns_resolver_subprocess.communicate(str.encode('ascii'))
if dns_resolver_subprocess.returncode == 0:
if '123.123.123.123'.encode('ascii') in dns_resolver_stdout:
test_runner_log(('DNS server is up! '
'Successfully reached it over UDP and TCP.'))
return
time.sleep(0.1)
dns_server_subprocess.kill()
dns_server_subprocess.wait()
test_runner_log(('Failed to reach DNS server over TCP and/or UDP. '
'Exitting without running tests.'))
test_runner_log('======= DNS server stdout '
'(merged stdout and stderr) =============')
with open(dns_server_subprocess_output, 'r') as l:
test_runner_log(l.read())
test_runner_log('======= end DNS server output=========')
sys.exit(1)
dns_server_subprocess_output = tempfile.mktemp()
with open(dns_server_subprocess_output, 'w') as l:
dns_server_subprocess = subprocess.Popen(python_args([
args.dns_server_bin_path,
'--port', str(args.dns_server_port),
'--records_config_path', args.records_config_path]),
stdin=subprocess.PIPE,
stdout=l,
stderr=l)
def _quit_on_signal(signum, _frame):
test_runner_log('Received signal: %d' % signum)
dns_server_subprocess.kill()
dns_server_subprocess.wait()
sys.exit(1)
signal.signal(signal.SIGINT, _quit_on_signal)
signal.signal(signal.SIGTERM, _quit_on_signal)
wait_until_dns_server_is_up(args,
dns_server_subprocess,
dns_server_subprocess_output)
num_test_failures = 0
test_runner_log('Run test with target: %s' % 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'no-srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '5.5.5.5:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-single-target.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-multi-target.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv6-single-target.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '[2607:f8b0:400a:801::1001]:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv6-multi-target.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-simple-service-config.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:1234,True',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', 'round_robin',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-no-srv-simple-service-config.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', 'round_robin',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-no-config-for-cpp.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-cpp-config-has-zero-percentage.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-second-language-is-cpp.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', 'round_robin',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-config-with-percentages.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', 'round_robin',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:1234,True;1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-config-causing-fallback-to-tcp.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-config-causing-fallback-to-tcp.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwo","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooThree","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFour","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFive","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooSix","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooSeven","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooEight","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooNine","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTen","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooEleven","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooTwelve","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooThirteen","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFourteen","service":"SimpleService"}],"waitForReady":true},{"name":[{"method":"FooFifteen","service":"SimpleService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '2.3.4.5:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'False',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '9.2.3.5:443,False;9.2.3.6:443,False;9.2.3.7:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'False',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv6-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv6-single-target-srv-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '[2600::1001]:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'False',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv6-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv6-multi-target-srv-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '[2600::1002]:443,False;[2600::1003]:443,False;[2600::1004]:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'False',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config-srv-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-simple-service-config-srv-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '5.5.3.4:443,False',
'--expected_chosen_service_config', '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService"}],"waitForReady":true}]}',
'--expected_service_config_error', '',
'--expected_lb_policy', 'round_robin',
'--enable_srv_queries', 'False',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'srv-ipv4-simple-service-config-txt-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'srv-ipv4-simple-service-config-txt-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'False',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-cpp-config-has-zero-percentage-txt-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-cpp-config-has-zero-percentage-txt-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'False',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-second-language-is-cpp-txt-disabled.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-second-language-is-cpp-txt-disabled.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'False',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_json.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-svc_cfg_bad_json.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', 'JSON parse error',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_client_language.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-svc_cfg_bad_client_language.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', 'clientLanguage error:is not an array',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_percentage.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-svc_cfg_bad_percentage.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', 'percentage error:failed to parse number',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-svc_cfg_bad_wait_for_ready.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-svc_cfg_bad_wait_for_ready.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', 'field:methodConfig[0].waitForReady error:is not a boolean',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'no-srv-ipv4-single-target-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'no-srv-ipv4-single-target-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '5.5.5.5:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'True',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'ipv4-config-causing-fallback-to-tcp-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'ipv4-config-causing-fallback-to-tcp-inject-broken-nameservers.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'False',
'--expected_addrs', '1.2.3.4:443,False',
'--expected_chosen_service_config', '',
'--expected_service_config_error', 'field:loadBalancingPolicy error:is not a string',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'True',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('Run test with target: %s' % 'load-balanced-name-with-dualstack-balancer.resolver-tests-version-4.grpctestingexp.')
current_test_subprocess = subprocess.Popen([
args.test_bin_path,
'--target_name', 'load-balanced-name-with-dualstack-balancer.resolver-tests-version-4.grpctestingexp.',
'--do_ordered_address_comparison', 'True',
'--expected_addrs', '[::1]:1234,True;[2002::1111]:1234,True',
'--expected_chosen_service_config', '',
'--expected_service_config_error', '',
'--expected_lb_policy', '',
'--enable_srv_queries', 'True',
'--enable_txt_queries', 'True',
'--inject_broken_nameserver_list', 'False',
'--local_dns_server_address', '127.0.0.1:%d' % args.dns_server_port
] + args.extra_args.split(','))
current_test_subprocess.communicate()
if current_test_subprocess.returncode != 0:
num_test_failures += 1
test_runner_log('now kill DNS server')
dns_server_subprocess.kill()
dns_server_subprocess.wait()
test_runner_log('%d tests failed.' % num_test_failures)
sys.exit(num_test_failures)
| 31,352
| 47.235385
| 1,276
|
py
|
grpc
|
grpc-master/test/cpp/naming/utils/tcp_connect.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Opens a TCP connection to a specified server and then exits."""
import argparse
import socket
import sys
import threading
import time
def main():
argp = argparse.ArgumentParser(
description="Open a TCP handshake to a server"
)
argp.add_argument(
"-s",
"--server_host",
default=None,
type=str,
help="Server host name or IP.",
)
argp.add_argument(
"-p",
"--server_port",
default=0,
type=int,
help="Port that the server is listening on.",
)
argp.add_argument(
"-t",
"--timeout",
default=1,
type=int,
help="Force process exit after this number of seconds.",
)
args = argp.parse_args()
socket.create_connection(
[args.server_host, args.server_port], timeout=args.timeout
)
if __name__ == "__main__":
main()
| 1,496
| 25.263158
| 74
|
py
|
grpc
|
grpc-master/test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import signal
import subprocess
import sys
import tempfile
import time
import yaml
argp = argparse.ArgumentParser(
description="Runs a DNS server for LB interop tests"
)
argp.add_argument(
"-l",
"--grpclb_ips",
default=None,
type=str,
help="Comma-separated list of IP addresses of balancers",
)
argp.add_argument(
"-f",
"--fallback_ips",
default=None,
type=str,
help="Comma-separated list of IP addresses of fallback servers",
)
argp.add_argument(
"-c",
"--cause_no_error_no_data_for_balancer_a_record",
default=False,
action="store_const",
const=True,
help=(
"Used for testing the case in which the grpclb "
"balancer A record lookup results in a DNS NOERROR response "
"but with no ANSWER section i.e. no addresses"
),
)
args = argp.parse_args()
balancer_records = []
grpclb_ips = args.grpclb_ips.split(",")
if grpclb_ips[0]:
for ip in grpclb_ips:
balancer_records.append(
{
"TTL": "2100",
"data": ip,
"type": "A",
}
)
fallback_records = []
fallback_ips = args.fallback_ips.split(",")
if fallback_ips[0]:
for ip in fallback_ips:
fallback_records.append(
{
"TTL": "2100",
"data": ip,
"type": "A",
}
)
records_config_yaml = {
"resolver_tests_common_zone_name": "test.google.fr.",
"resolver_component_tests": [
{
"records": {
"_grpclb._tcp.server": [
{
"TTL": "2100",
"data": "0 0 12000 balancer",
"type": "SRV",
},
],
"balancer": balancer_records,
"server": fallback_records,
}
}
],
}
if args.cause_no_error_no_data_for_balancer_a_record:
balancer_records = records_config_yaml["resolver_component_tests"][0][
"records"
]["balancer"]
assert not balancer_records
# Insert a TXT record at the balancer.test.google.fr. domain.
# This TXT record won't actually be resolved or used by gRPC clients;
# inserting this record is just a way get the balancer.test.google.fr.
# A record queries to return NOERROR DNS responses that also have no
# ANSWER section, in order to simulate this failure case.
balancer_records.append(
{
"TTL": "2100",
"data": "arbitrary string that wont actually be resolved",
"type": "TXT",
}
)
# Generate the actual DNS server records config file
records_config_path = tempfile.mktemp()
with open(records_config_path, "w") as records_config_generated:
records_config_generated.write(yaml.dump(records_config_yaml))
with open(records_config_path, "r") as records_config_generated:
sys.stderr.write("===== DNS server records config: =====\n")
sys.stderr.write(records_config_generated.read())
sys.stderr.write("======================================\n")
# Run the DNS server
# Note that we need to add the extra
# A record for metadata.google.internal in order for compute engine
# OAuth creds and ALTS creds to work.
# TODO(apolcyn): should metadata.google.internal always resolve
# to 169.254.169.254?
subprocess.check_output(
[
"/var/local/git/grpc/test/cpp/naming/utils/dns_server.py",
"--port=53",
"--records_config_path",
records_config_path,
"--add_a_record=metadata.google.internal:169.254.169.254",
]
)
| 4,234
| 29.467626
| 74
|
py
|
grpc
|
grpc-master/test/cpp/naming/utils/dns_server.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
import os
import platform
import signal
import sys
import threading
import time
import twisted
import twisted.internet
import twisted.internet.defer
import twisted.internet.protocol
import twisted.internet.reactor
import twisted.internet.threads
import twisted.names
from twisted.names import authority
from twisted.names import client
from twisted.names import common
from twisted.names import dns
from twisted.names import server
import twisted.names.client
import twisted.names.dns
import twisted.names.server
import yaml
_SERVER_HEALTH_CHECK_RECORD_NAME = ( # missing end '.' for twisted syntax
"health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp"
)
_SERVER_HEALTH_CHECK_RECORD_DATA = "123.123.123.123"
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def start_local_dns_server(args):
all_records = {}
def _push_record(name, r):
name = name.encode("ascii")
print("pushing record: |%s|" % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
txt_data = txt_data.encode("ascii")
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start : start + next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.safe_load(config)
common_zone_name = test_records_config["resolver_tests_common_zone_name"]
for group in test_records_config["resolver_component_tests"]:
for name in group["records"].keys():
for record in group["records"][name]:
r_type = record["type"]
r_data = record["data"]
r_ttl = int(record["TTL"])
record_full_name = "%s.%s" % (name, common_zone_name)
assert record_full_name[-1] == "."
record_full_name = record_full_name[:-1]
if r_type == "A":
_push_record(
record_full_name, dns.Record_A(r_data, ttl=r_ttl)
)
if r_type == "AAAA":
_push_record(
record_full_name, dns.Record_AAAA(r_data, ttl=r_ttl)
)
if r_type == "SRV":
p, w, port, target = r_data.split(" ")
p = int(p)
w = int(w)
port = int(port)
target_full_name = (
"%s.%s" % (target, common_zone_name)
).encode("ascii")
_push_record(
record_full_name,
dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl),
)
if r_type == "TXT":
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(":")
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(
_SERVER_HEALTH_CHECK_RECORD_NAME,
dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0),
)
soa_record = dns.Record_SOA(mname=common_zone_name.encode("ascii"))
test_domain_com = NoFileAuthority(
soa=(common_zone_name.encode("ascii"), soa_record),
records=all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2
)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print("starting local dns server on 127.0.0.1:%s" % args.port)
print("starting twisted.internet.reactor")
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame):
print("Received SIGNAL %d. Quitting with exit code 0" % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
def flush_stdout_loop():
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print("Process timeout reached, or cancelled. Exitting 0.")
os.kill(os.getpid(), signal.SIGTERM)
def main():
argp = argparse.ArgumentParser(
description="Local DNS Server for resolver tests"
)
argp.add_argument(
"-p",
"--port",
default=None,
type=int,
help="Port for DNS server to listen on for TCP and UDP.",
)
argp.add_argument(
"-r",
"--records_config_path",
default=None,
type=str,
help=(
"Directory of resolver_test_record_groups.yaml file. "
"Defaults to path needed when the test is invoked as part "
"of run_tests.py."
),
)
argp.add_argument(
"--add_a_record",
default=None,
type=str,
help=(
"Add an A record via the command line. Useful for when we "
"need to serve a one-off A record that is under a "
"different domain then the rest the records configured in "
"--records_config_path (which all need to be under the "
"same domain). Format: <name>:<ipv4 address>"
),
)
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == "__main__":
main()
| 7,096
| 33.451456
| 80
|
py
|
grpc
|
grpc-master/test/cpp/naming/utils/dns_resolver.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makes DNS queries for A records to specified servers"""
import argparse
import threading
import time
import twisted.internet.reactor as reactor
import twisted.internet.task as task
import twisted.names.client as client
def main():
argp = argparse.ArgumentParser(description="Make DNS queries for A records")
argp.add_argument(
"-s",
"--server_host",
default="127.0.0.1",
type=str,
help="Host for DNS server to listen on for TCP and UDP.",
)
argp.add_argument(
"-p",
"--server_port",
default=53,
type=int,
help="Port that the DNS server is listening on.",
)
argp.add_argument(
"-n",
"--qname",
default=None,
type=str,
help="Name of the record to query for. ",
)
argp.add_argument(
"-t",
"--timeout",
default=1,
type=int,
help="Force process exit after this number of seconds.",
)
args = argp.parse_args()
def OnResolverResultAvailable(result):
answers, authority, additional = result
for a in answers:
print(a.payload)
def BeginQuery(reactor, qname):
servers = [(args.server_host, args.server_port)]
resolver = client.Resolver(servers=servers)
deferred_result = resolver.lookupAddress(args.qname)
deferred_result.addCallback(OnResolverResultAvailable)
return deferred_result
task.react(BeginQuery, [args.qname])
if __name__ == "__main__":
main()
| 2,143
| 27.586667
| 80
|
py
|
grpc
|
grpc-master/test/cpp/qps/json_run_localhost_scenario_gen.py
|
#!/usr/bin/env python3
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(script_dir)
import scenario_generator_helper as gen
gen.generate_scenarios_bzl(
gen.generate_json_run_localhost_scenarios(),
os.path.join(script_dir, "json_run_localhost_scenarios.bzl"),
"JSON_RUN_LOCALHOST_SCENARIOS",
)
| 931
| 30.066667
| 74
|
py
|
grpc
|
grpc-master/test/cpp/qps/qps_json_driver_scenario_gen.py
|
#!/usr/bin/env python3
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(script_dir)
import scenario_generator_helper as gen
gen.generate_scenarios_bzl(
gen.generate_qps_json_driver_scenarios(),
os.path.join(script_dir, "qps_json_driver_scenarios.bzl"),
"QPS_JSON_DRIVER_SCENARIOS",
)
| 922
| 29.766667
| 74
|
py
|
grpc
|
grpc-master/test/cpp/qps/scenario_generator_helper.py
|
#!/usr/bin/env python3
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import os
import sys
import yaml
run_tests_root = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), "../../../tools/run_tests")
)
sys.path.append(run_tests_root)
import performance.scenario_config as scenario_config
_COPYRIGHT = """# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
def _mutate_scenario(scenario_json):
"""Modifies vanilla benchmark scenario config to make it more suitable for running as a unit test."""
# tweak parameters to get fast test times
scenario_json = dict(scenario_json)
scenario_json["warmup_seconds"] = 0
scenario_json["benchmark_seconds"] = 1
outstanding_rpcs_divisor = 1
if (
scenario_json["client_config"]["client_type"] == "SYNC_CLIENT"
or scenario_json["server_config"]["server_type"] == "SYNC_SERVER"
):
# reduce the number of threads needed for scenarios that use synchronous API
outstanding_rpcs_divisor = 10
scenario_json["client_config"]["outstanding_rpcs_per_channel"] = max(
1,
scenario_json["client_config"]["outstanding_rpcs_per_channel"]
// outstanding_rpcs_divisor,
)
# Some scenarios use high channel count since when actually
# benchmarking, we want to saturate the machine that runs the benchmark.
# For unit test, this is an overkill.
max_client_channels = 16
if scenario_json["client_config"]["rpc_type"] == "STREAMING_FROM_SERVER":
# streaming from server scenarios tend to have trouble shutting down
# quickly if there are too many channels.
max_client_channels = 4
scenario_json["client_config"]["client_channels"] = min(
max_client_channels, scenario_json["client_config"]["client_channels"]
)
return scenario_config.remove_nonproto_fields(scenario_json)
def generate_json_run_localhost_scenarios():
return [
_mutate_scenario(scenario_json)
for scenario_json in scenario_config.CXXLanguage().scenarios()
if "scalable" in scenario_json.get("CATEGORIES", [])
]
def generate_qps_json_driver_scenarios():
return [
_mutate_scenario(scenario_json)
for scenario_json in scenario_config.CXXLanguage().scenarios()
if "inproc" in scenario_json.get("CATEGORIES", [])
]
def generate_scenarios_bzl(json_scenarios, bzl_filename, bzl_variablename):
"""Generate .bzl file that defines a variable with JSON scenario configs."""
all_scenarios = []
for scenario in json_scenarios:
scenario_name = scenario["name"]
# argument will be passed as "--scenarios_json" to the test binary
# the string needs to be quoted in \' to ensure it gets passed as a single argument in shell
scenarios_json_arg_str = "\\'%s\\'" % json.dumps(
{"scenarios": [scenario]}
)
all_scenarios.append((scenario_name, scenarios_json_arg_str))
with open(bzl_filename, "w") as f:
f.write(_COPYRIGHT)
f.write(
'"""AUTOGENERATED: configuration of benchmark scenarios to be run'
' as bazel test"""\n\n'
)
f.write("%s = {\n" % bzl_variablename)
for scenario in all_scenarios:
f.write(" \"%s\": '%s',\n" % (scenario[0], scenario[1]))
f.write("}\n")
| 4,455
| 36.133333
| 105
|
py
|
grpc
|
grpc-master/test/http2_test/http2_test_server.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTTP2 Test Server"""
import argparse
import logging
import sys
import http2_base_server
import test_data_frame_padding
import test_goaway
import test_max_streams
import test_ping
import test_rst_after_data
import test_rst_after_header
import test_rst_during_data
import twisted
import twisted.internet
import twisted.internet.endpoints
import twisted.internet.reactor
_TEST_CASE_MAPPING = {
"rst_after_header": test_rst_after_header.TestcaseRstStreamAfterHeader,
"rst_after_data": test_rst_after_data.TestcaseRstStreamAfterData,
"rst_during_data": test_rst_during_data.TestcaseRstStreamDuringData,
"goaway": test_goaway.TestcaseGoaway,
"ping": test_ping.TestcasePing,
"max_streams": test_max_streams.TestcaseSettingsMaxStreams,
# Positive tests below:
"data_frame_padding": test_data_frame_padding.TestDataFramePadding,
"no_df_padding_sanity_test": test_data_frame_padding.TestDataFramePadding,
}
_exit_code = 0
class H2Factory(twisted.internet.protocol.Factory):
def __init__(self, testcase):
logging.info("Creating H2Factory for new connection (%s)", testcase)
self._num_streams = 0
self._testcase = testcase
def buildProtocol(self, addr):
self._num_streams += 1
logging.info("New Connection: %d" % self._num_streams)
if not _TEST_CASE_MAPPING.has_key(self._testcase):
logging.error("Unknown test case: %s" % self._testcase)
assert 0
else:
t = _TEST_CASE_MAPPING[self._testcase]
if self._testcase == "goaway":
return t(self._num_streams).get_base_server()
elif self._testcase == "no_df_padding_sanity_test":
return t(use_padding=False).get_base_server()
else:
return t().get_base_server()
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--base_port",
type=int,
default=8080,
help=(
"base port to run the servers (default: 8080). One test server is "
"started on each incrementing port, beginning with base_port, in"
" the "
"following order: data_frame_padding,goaway,max_streams,"
"no_df_padding_sanity_test,ping,rst_after_data,rst_after_header,"
"rst_during_data"
),
)
return parser.parse_args()
def listen(endpoint, test_case):
deferred = endpoint.listen(H2Factory(test_case))
def listen_error(reason):
# If listening fails, we stop the reactor and exit the program
# with exit code 1.
global _exit_code
_exit_code = 1
logging.error("Listening failed: %s" % reason.value)
twisted.internet.reactor.stop()
deferred.addErrback(listen_error)
def start_test_servers(base_port):
"""Start one server per test case on incrementing port numbers
beginning with base_port"""
index = 0
for test_case in sorted(_TEST_CASE_MAPPING.keys()):
portnum = base_port + index
logging.warning("serving on port %d : %s" % (portnum, test_case))
endpoint = twisted.internet.endpoints.TCP4ServerEndpoint(
twisted.internet.reactor, portnum, backlog=128
)
# Wait until the reactor is running before calling endpoint.listen().
twisted.internet.reactor.callWhenRunning(listen, endpoint, test_case)
index += 1
if __name__ == "__main__":
logging.basicConfig(
format=(
"%(levelname) -10s %(asctime)s %(module)s:%(lineno)s | %(message)s"
),
level=logging.INFO,
)
args = parse_arguments()
start_test_servers(args.base_port)
twisted.internet.reactor.run()
sys.exit(_exit_code)
| 4,304
| 32.115385
| 79
|
py
|
grpc
|
grpc-master/test/http2_test/test_rst_during_data.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http2_base_server
class TestcaseRstStreamDuringData(object):
"""
In response to an incoming request, this test sends headers, followed by
some data, followed by a reset stream frame. Client asserts that the RPC
failed and does not deliver the message to the application.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["SendDone"] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(
sr.response_size
)
self._ready_to_send = True
response_len = len(response_data)
truncated_response_data = response_data[0 : response_len / 2]
self._base_server.setup_send(
truncated_response_data, event.stream_id
)
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False
| 1,900
| 37.02
| 76
|
py
|
grpc
|
grpc-master/test/http2_test/http2_server_health_check.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import hyper
# Utility to healthcheck the http2 server. Used when starting the server to
# verify that the server is live before tests begin.
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--server_host", type=str, default="localhost")
parser.add_argument("--server_port", type=int, default=8080)
args = parser.parse_args()
server_host = args.server_host
server_port = args.server_port
conn = hyper.HTTP20Connection("%s:%d" % (server_host, server_port))
conn.request("POST", "/grpc.testing.TestService/UnaryCall")
resp = conn.get_response()
if resp.headers.get("grpc-encoding") is None:
sys.exit(1)
else:
sys.exit(0)
| 1,320
| 35.694444
| 75
|
py
|
grpc
|
grpc-master/test/http2_test/test_rst_after_data.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http2_base_server
class TestcaseRstStreamAfterData(object):
"""
In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["SendDone"] = self.on_send_done
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
response_data = self._base_server.default_response_data(
sr.response_size
)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)
# send reset stream
def on_send_done(self, stream_id):
self._base_server.send_reset_stream()
self._base_server._stream_status[stream_id] = False
| 1,785
| 37
| 79
|
py
|
grpc
|
grpc-master/test/http2_test/http2_base_server.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
import h2
import h2.connection
import messages_pb2
import twisted
import twisted.internet
import twisted.internet.protocol
_READ_CHUNK_SIZE = 16384
_GRPC_HEADER_SIZE = 5
_MIN_SETTINGS_MAX_FRAME_SIZE = 16384
class H2ProtocolBaseServer(twisted.internet.protocol.Protocol):
def __init__(self):
self._conn = h2.connection.H2Connection(client_side=False)
self._recv_buffer = {}
self._handlers = {}
self._handlers["ConnectionMade"] = self.on_connection_made_default
self._handlers["DataReceived"] = self.on_data_received_default
self._handlers["WindowUpdated"] = self.on_window_update_default
self._handlers["RequestReceived"] = self.on_request_received_default
self._handlers["SendDone"] = self.on_send_done_default
self._handlers["ConnectionLost"] = self.on_connection_lost
self._handlers["PingAcknowledged"] = self.on_ping_acknowledged_default
self._stream_status = {}
self._send_remaining = {}
self._outstanding_pings = 0
def set_handlers(self, handlers):
self._handlers = handlers
def connectionMade(self):
self._handlers["ConnectionMade"]()
def connectionLost(self, reason):
self._handlers["ConnectionLost"](reason)
def on_connection_made_default(self):
logging.info("Connection Made")
self._conn.initiate_connection()
self.transport.setTcpNoDelay(True)
self.transport.write(self._conn.data_to_send())
def on_connection_lost(self, reason):
logging.info("Disconnected %s" % reason)
def dataReceived(self, data):
try:
events = self._conn.receive_data(data)
except h2.exceptions.ProtocolError:
# this try/except block catches exceptions due to race between sending
# GOAWAY and processing a response in flight.
return
if self._conn.data_to_send:
self.transport.write(self._conn.data_to_send())
for event in events:
if isinstance(
event, h2.events.RequestReceived
) and self._handlers.has_key("RequestReceived"):
logging.info(
"RequestReceived Event for stream: %d" % event.stream_id
)
self._handlers["RequestReceived"](event)
elif isinstance(
event, h2.events.DataReceived
) and self._handlers.has_key("DataReceived"):
logging.info(
"DataReceived Event for stream: %d" % event.stream_id
)
self._handlers["DataReceived"](event)
elif isinstance(
event, h2.events.WindowUpdated
) and self._handlers.has_key("WindowUpdated"):
logging.info(
"WindowUpdated Event for stream: %d" % event.stream_id
)
self._handlers["WindowUpdated"](event)
elif isinstance(
event, h2.events.PingAcknowledged
) and self._handlers.has_key("PingAcknowledged"):
logging.info("PingAcknowledged Event")
self._handlers["PingAcknowledged"](event)
self.transport.write(self._conn.data_to_send())
def on_ping_acknowledged_default(self, event):
logging.info("ping acknowledged")
self._outstanding_pings -= 1
def on_data_received_default(self, event):
self._conn.acknowledge_received_data(len(event.data), event.stream_id)
self._recv_buffer[event.stream_id] += event.data
def on_request_received_default(self, event):
self._recv_buffer[event.stream_id] = ""
self._stream_id = event.stream_id
self._stream_status[event.stream_id] = True
self._conn.send_headers(
stream_id=event.stream_id,
headers=[
(":status", "200"),
("content-type", "application/grpc"),
("grpc-encoding", "identity"),
("grpc-accept-encoding", "identity,deflate,gzip"),
],
)
self.transport.write(self._conn.data_to_send())
def on_window_update_default(
self, _, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE
):
# try to resume sending on all active streams (update might be for connection)
for stream_id in self._send_remaining:
self.default_send(
stream_id,
pad_length=pad_length,
read_chunk_size=read_chunk_size,
)
def send_reset_stream(self):
self._conn.reset_stream(self._stream_id)
self.transport.write(self._conn.data_to_send())
def setup_send(
self,
data_to_send,
stream_id,
pad_length=None,
read_chunk_size=_READ_CHUNK_SIZE,
):
logging.info("Setting up data to send for stream_id: %d" % stream_id)
self._send_remaining[stream_id] = len(data_to_send)
self._send_offset = 0
self._data_to_send = data_to_send
self.default_send(
stream_id, pad_length=pad_length, read_chunk_size=read_chunk_size
)
def default_send(
self, stream_id, pad_length=None, read_chunk_size=_READ_CHUNK_SIZE
):
if not self._send_remaining.has_key(stream_id):
# not setup to send data yet
return
while self._send_remaining[stream_id] > 0:
lfcw = self._conn.local_flow_control_window(stream_id)
padding_bytes = pad_length + 1 if pad_length is not None else 0
if lfcw - padding_bytes <= 0:
logging.info(
"Stream %d. lfcw: %d. padding bytes: %d. not enough"
" quota yet" % (stream_id, lfcw, padding_bytes)
)
break
chunk_size = min(lfcw - padding_bytes, read_chunk_size)
bytes_to_send = min(chunk_size, self._send_remaining[stream_id])
logging.info(
"flow_control_window = %d. sending [%d:%d] stream_id %d."
" includes %d total padding bytes"
% (
lfcw,
self._send_offset,
self._send_offset + bytes_to_send + padding_bytes,
stream_id,
padding_bytes,
)
)
# The receiver might allow sending frames larger than the http2 minimum
# max frame size (16384), but this test should never send more than 16384
# for simplicity (which is always legal).
if bytes_to_send + padding_bytes > _MIN_SETTINGS_MAX_FRAME_SIZE:
raise ValueError(
"overload: sending %d" % (bytes_to_send + padding_bytes)
)
data = self._data_to_send[
self._send_offset : self._send_offset + bytes_to_send
]
try:
self._conn.send_data(
stream_id, data, end_stream=False, pad_length=pad_length
)
except h2.exceptions.ProtocolError:
logging.info("Stream %d is closed" % stream_id)
break
self._send_remaining[stream_id] -= bytes_to_send
self._send_offset += bytes_to_send
if self._send_remaining[stream_id] == 0:
self._handlers["SendDone"](stream_id)
def default_ping(self):
logging.info("sending ping")
self._outstanding_pings += 1
self._conn.ping(b"\x00" * 8)
self.transport.write(self._conn.data_to_send())
def on_send_done_default(self, stream_id):
if self._stream_status[stream_id]:
self._stream_status[stream_id] = False
self.default_send_trailer(stream_id)
else:
logging.error("Stream %d is already closed" % stream_id)
def default_send_trailer(self, stream_id):
logging.info("Sending trailer for stream id %d" % stream_id)
self._conn.send_headers(
stream_id, headers=[("grpc-status", "0")], end_stream=True
)
self.transport.write(self._conn.data_to_send())
@staticmethod
def default_response_data(response_size):
sresp = messages_pb2.SimpleResponse()
sresp.payload.body = b"\x00" * response_size
serialized_resp_proto = sresp.SerializeToString()
response_data = (
b"\x00"
+ struct.pack("i", len(serialized_resp_proto))[::-1]
+ serialized_resp_proto
)
return response_data
def parse_received_data(self, stream_id):
"""returns a grpc framed string of bytes containing response proto of the size
asked in request"""
recv_buffer = self._recv_buffer[stream_id]
grpc_msg_size = struct.unpack("i", recv_buffer[1:5][::-1])[0]
if len(recv_buffer) != _GRPC_HEADER_SIZE + grpc_msg_size:
return None
req_proto_str = recv_buffer[5 : 5 + grpc_msg_size]
sr = messages_pb2.SimpleRequest()
sr.ParseFromString(req_proto_str)
logging.info("Parsed simple request for stream %d" % stream_id)
return sr
| 9,814
| 38.26
| 86
|
py
|
grpc
|
grpc-master/test/http2_test/test_goaway.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import http2_base_server
class TestcaseGoaway(object):
"""
This test does the following:
Process incoming request normally, i.e. send headers, data and trailers.
Then send a GOAWAY frame with the stream id of the processed request.
It checks that the next request is made on a different TCP connection.
"""
def __init__(self, iteration):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
"RequestReceived"
] = self.on_request_received
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["SendDone"] = self.on_send_done
self._base_server._handlers["ConnectionLost"] = self.on_connection_lost
self._ready_to_send = False
self._iteration = iteration
def get_base_server(self):
return self._base_server
def on_connection_lost(self, reason):
logging.info("Disconnect received. Count %d" % self._iteration)
# _iteration == 2 => Two different connections have been used.
if self._iteration == 2:
self._base_server.on_connection_lost(reason)
def on_send_done(self, stream_id):
self._base_server.on_send_done_default(stream_id)
logging.info("Sending GOAWAY for stream %d:" % stream_id)
self._base_server._conn.close_connection(
error_code=0, additional_data=None, last_stream_id=stream_id
)
self._base_server._stream_status[stream_id] = False
def on_request_received(self, event):
self._ready_to_send = False
self._base_server.on_request_received_default(event)
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info("Creating response size = %s" % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size
)
self._ready_to_send = True
self._base_server.setup_send(response_data, event.stream_id)
| 2,772
| 38.056338
| 79
|
py
|
grpc
|
grpc-master/test/http2_test/test_ping.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import http2_base_server
class TestcasePing(object):
"""
This test injects PING frames before and after header and data. Keeps count
of outstanding ping response and asserts when the count is non-zero at the
end of the test.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
"RequestReceived"
] = self.on_request_received
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["ConnectionLost"] = self.on_connection_lost
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
self._base_server.default_ping()
self._base_server.on_request_received_default(event)
self._base_server.default_ping()
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info("Creating response size = %s" % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size
)
self._base_server.default_ping()
self._base_server.setup_send(response_data, event.stream_id)
self._base_server.default_ping()
def on_connection_lost(self, reason):
logging.info(
"Disconnect received. Ping Count %d"
% self._base_server._outstanding_pings
)
assert self._base_server._outstanding_pings == 0
self._base_server.on_connection_lost(reason)
| 2,270
| 35.629032
| 79
|
py
|
grpc
|
grpc-master/test/http2_test/test_data_frame_padding.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import http2_base_server
import messages_pb2
# Set the number of padding bytes per data frame to be very large
# relative to the number of data bytes for each data frame sent.
_LARGE_PADDING_LENGTH = 255
_SMALL_READ_CHUNK_SIZE = 5
class TestDataFramePadding(object):
"""
In response to an incoming request, this test sends headers, followed by
data, followed by a reset stream frame. Client asserts that the RPC failed.
Client needs to deliver the complete message to the application layer.
"""
def __init__(self, use_padding=True):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["WindowUpdated"] = self.on_window_update
self._base_server._handlers[
"RequestReceived"
] = self.on_request_received
# _total_updates maps stream ids to total flow control updates received
self._total_updates = {}
# zero window updates so far for connection window (stream id '0')
self._total_updates[0] = 0
self._read_chunk_size = _SMALL_READ_CHUNK_SIZE
if use_padding:
self._pad_length = _LARGE_PADDING_LENGTH
else:
self._pad_length = None
def get_base_server(self):
return self._base_server
def on_data_received(self, event):
logging.info(
"on data received. Stream id: %d. Data length: %d"
% (event.stream_id, len(event.data))
)
self._base_server.on_data_received_default(event)
if len(event.data) == 0:
return
sr = self._base_server.parse_received_data(event.stream_id)
stream_bytes = ""
# Check if full grpc msg has been read into the recv buffer yet
if sr:
response_data = self._base_server.default_response_data(
sr.response_size
)
logging.info(
"Stream id: %d. total resp size: %d"
% (event.stream_id, len(response_data))
)
# Begin sending the response. Add ``self._pad_length`` padding to each
# data frame and split the whole message into data frames each carrying
# only self._read_chunk_size of data.
# The purpose is to have the majority of the data frame response bytes
# be padding bytes, since ``self._pad_length`` >> ``self._read_chunk_size``.
self._base_server.setup_send(
response_data,
event.stream_id,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size,
)
def on_request_received(self, event):
self._base_server.on_request_received_default(event)
logging.info("on request received. Stream id: %s." % event.stream_id)
self._total_updates[event.stream_id] = 0
# Log debug info and try to resume sending on all currently active streams.
def on_window_update(self, event):
logging.info(
"on window update. Stream id: %s. Delta: %s"
% (event.stream_id, event.delta)
)
self._total_updates[event.stream_id] += event.delta
total = self._total_updates[event.stream_id]
logging.info(
"... - total updates for stream %d : %d" % (event.stream_id, total)
)
self._base_server.on_window_update_default(
event,
pad_length=self._pad_length,
read_chunk_size=self._read_chunk_size,
)
| 4,185
| 38.121495
| 88
|
py
|
grpc
|
grpc-master/test/http2_test/messages_pb2.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messages.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='messages.proto',
package='grpc.testing',
syntax='proto3',
serialized_pb=_b('\n\x0emessages.proto\x12\x0cgrpc.testing\"\x1a\n\tBoolValue\x12\r\n\x05value\x18\x01 \x01(\x08\"@\n\x07Payload\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\"+\n\nEchoStatus\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\xce\x02\n\rSimpleRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12\x15\n\rresponse_size\x18\x02 \x01(\x05\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x15\n\rfill_username\x18\x04 \x01(\x08\x12\x18\n\x10\x66ill_oauth_scope\x18\x05 \x01(\x08\x12\x34\n\x13response_compressed\x18\x06 \x01(\x0b\x32\x17.grpc.testing.BoolValue\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\x12\x32\n\x11\x65xpect_compressed\x18\x08 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"_\n\x0eSimpleResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x10\n\x08username\x18\x02 \x01(\t\x12\x13\n\x0boauth_scope\x18\x03 \x01(\t\"w\n\x19StreamingInputCallRequest\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x32\n\x11\x65xpect_compressed\x18\x02 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"=\n\x1aStreamingInputCallResponse\x12\x1f\n\x17\x61ggregated_payload_size\x18\x01 \x01(\x05\"d\n\x12ResponseParameters\x12\x0c\n\x04size\x18\x01 \x01(\x05\x12\x13\n\x0binterval_us\x18\x02 \x01(\x05\x12+\n\ncompressed\x18\x03 \x01(\x0b\x32\x17.grpc.testing.BoolValue\"\xe8\x01\n\x1aStreamingOutputCallRequest\x12\x30\n\rresponse_type\x18\x01 \x01(\x0e\x32\x19.grpc.testing.PayloadType\x12=\n\x13response_parameters\x18\x02 \x03(\x0b\x32 .grpc.testing.ResponseParameters\x12&\n\x07payload\x18\x03 \x01(\x0b\x32\x15.grpc.testing.Payload\x12\x31\n\x0fresponse_status\x18\x07 \x01(\x0b\x32\x18.grpc.testing.EchoStatus\"E\n\x1bStreamingOutputCallResponse\x12&\n\x07payload\x18\x01 \x01(\x0b\x32\x15.grpc.testing.Payload\"3\n\x0fReconnectParams\x12 \n\x18max_reconnect_backoff_ms\x18\x01 \x01(\x05\"3\n\rReconnectInfo\x12\x0e\n\x06passed\x18\x01 \x01(\x08\x12\x12\n\nbackoff_ms\x18\x02 \x03(\x05*\x1f\n\x0bPayloadType\x12\x10\n\x0c\x43OMPRESSABLE\x10\x00\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PAYLOADTYPE = _descriptor.EnumDescriptor(
name='PayloadType',
full_name='grpc.testing.PayloadType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COMPRESSABLE', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1303,
serialized_end=1334,
)
_sym_db.RegisterEnumDescriptor(_PAYLOADTYPE)
PayloadType = enum_type_wrapper.EnumTypeWrapper(_PAYLOADTYPE)
COMPRESSABLE = 0
_BOOLVALUE = _descriptor.Descriptor(
name='BoolValue',
full_name='grpc.testing.BoolValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='grpc.testing.BoolValue.value', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=58,
)
_PAYLOAD = _descriptor.Descriptor(
name='Payload',
full_name='grpc.testing.Payload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='grpc.testing.Payload.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='body', full_name='grpc.testing.Payload.body', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=124,
)
_ECHOSTATUS = _descriptor.Descriptor(
name='EchoStatus',
full_name='grpc.testing.EchoStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='grpc.testing.EchoStatus.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='grpc.testing.EchoStatus.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=169,
)
_SIMPLEREQUEST = _descriptor.Descriptor(
name='SimpleRequest',
full_name='grpc.testing.SimpleRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_type', full_name='grpc.testing.SimpleRequest.response_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_size', full_name='grpc.testing.SimpleRequest.response_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='grpc.testing.SimpleRequest.payload', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fill_username', full_name='grpc.testing.SimpleRequest.fill_username', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fill_oauth_scope', full_name='grpc.testing.SimpleRequest.fill_oauth_scope', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_compressed', full_name='grpc.testing.SimpleRequest.response_compressed', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_status', full_name='grpc.testing.SimpleRequest.response_status', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expect_compressed', full_name='grpc.testing.SimpleRequest.expect_compressed', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=172,
serialized_end=506,
)
_SIMPLERESPONSE = _descriptor.Descriptor(
name='SimpleResponse',
full_name='grpc.testing.SimpleResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='grpc.testing.SimpleResponse.payload', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='username', full_name='grpc.testing.SimpleResponse.username', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oauth_scope', full_name='grpc.testing.SimpleResponse.oauth_scope', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=508,
serialized_end=603,
)
_STREAMINGINPUTCALLREQUEST = _descriptor.Descriptor(
name='StreamingInputCallRequest',
full_name='grpc.testing.StreamingInputCallRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='grpc.testing.StreamingInputCallRequest.payload', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expect_compressed', full_name='grpc.testing.StreamingInputCallRequest.expect_compressed', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=605,
serialized_end=724,
)
_STREAMINGINPUTCALLRESPONSE = _descriptor.Descriptor(
name='StreamingInputCallResponse',
full_name='grpc.testing.StreamingInputCallResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aggregated_payload_size', full_name='grpc.testing.StreamingInputCallResponse.aggregated_payload_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=726,
serialized_end=787,
)
_RESPONSEPARAMETERS = _descriptor.Descriptor(
name='ResponseParameters',
full_name='grpc.testing.ResponseParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='grpc.testing.ResponseParameters.size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='interval_us', full_name='grpc.testing.ResponseParameters.interval_us', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='compressed', full_name='grpc.testing.ResponseParameters.compressed', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=789,
serialized_end=889,
)
_STREAMINGOUTPUTCALLREQUEST = _descriptor.Descriptor(
name='StreamingOutputCallRequest',
full_name='grpc.testing.StreamingOutputCallRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_type', full_name='grpc.testing.StreamingOutputCallRequest.response_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_parameters', full_name='grpc.testing.StreamingOutputCallRequest.response_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='grpc.testing.StreamingOutputCallRequest.payload', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_status', full_name='grpc.testing.StreamingOutputCallRequest.response_status', index=3,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=892,
serialized_end=1124,
)
_STREAMINGOUTPUTCALLRESPONSE = _descriptor.Descriptor(
name='StreamingOutputCallResponse',
full_name='grpc.testing.StreamingOutputCallResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='grpc.testing.StreamingOutputCallResponse.payload', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1126,
serialized_end=1195,
)
_RECONNECTPARAMS = _descriptor.Descriptor(
name='ReconnectParams',
full_name='grpc.testing.ReconnectParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_reconnect_backoff_ms', full_name='grpc.testing.ReconnectParams.max_reconnect_backoff_ms', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1197,
serialized_end=1248,
)
_RECONNECTINFO = _descriptor.Descriptor(
name='ReconnectInfo',
full_name='grpc.testing.ReconnectInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='passed', full_name='grpc.testing.ReconnectInfo.passed', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='backoff_ms', full_name='grpc.testing.ReconnectInfo.backoff_ms', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1250,
serialized_end=1301,
)
_PAYLOAD.fields_by_name['type'].enum_type = _PAYLOADTYPE
_SIMPLEREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE
_SIMPLEREQUEST.fields_by_name['payload'].message_type = _PAYLOAD
_SIMPLEREQUEST.fields_by_name['response_compressed'].message_type = _BOOLVALUE
_SIMPLEREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS
_SIMPLEREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE
_SIMPLERESPONSE.fields_by_name['payload'].message_type = _PAYLOAD
_STREAMINGINPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD
_STREAMINGINPUTCALLREQUEST.fields_by_name['expect_compressed'].message_type = _BOOLVALUE
_RESPONSEPARAMETERS.fields_by_name['compressed'].message_type = _BOOLVALUE
_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_type'].enum_type = _PAYLOADTYPE
_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_parameters'].message_type = _RESPONSEPARAMETERS
_STREAMINGOUTPUTCALLREQUEST.fields_by_name['payload'].message_type = _PAYLOAD
_STREAMINGOUTPUTCALLREQUEST.fields_by_name['response_status'].message_type = _ECHOSTATUS
_STREAMINGOUTPUTCALLRESPONSE.fields_by_name['payload'].message_type = _PAYLOAD
DESCRIPTOR.message_types_by_name['BoolValue'] = _BOOLVALUE
DESCRIPTOR.message_types_by_name['Payload'] = _PAYLOAD
DESCRIPTOR.message_types_by_name['EchoStatus'] = _ECHOSTATUS
DESCRIPTOR.message_types_by_name['SimpleRequest'] = _SIMPLEREQUEST
DESCRIPTOR.message_types_by_name['SimpleResponse'] = _SIMPLERESPONSE
DESCRIPTOR.message_types_by_name['StreamingInputCallRequest'] = _STREAMINGINPUTCALLREQUEST
DESCRIPTOR.message_types_by_name['StreamingInputCallResponse'] = _STREAMINGINPUTCALLRESPONSE
DESCRIPTOR.message_types_by_name['ResponseParameters'] = _RESPONSEPARAMETERS
DESCRIPTOR.message_types_by_name['StreamingOutputCallRequest'] = _STREAMINGOUTPUTCALLREQUEST
DESCRIPTOR.message_types_by_name['StreamingOutputCallResponse'] = _STREAMINGOUTPUTCALLRESPONSE
DESCRIPTOR.message_types_by_name['ReconnectParams'] = _RECONNECTPARAMS
DESCRIPTOR.message_types_by_name['ReconnectInfo'] = _RECONNECTINFO
DESCRIPTOR.enum_types_by_name['PayloadType'] = _PAYLOADTYPE
BoolValue = _reflection.GeneratedProtocolMessageType('BoolValue', (_message.Message,), dict(
DESCRIPTOR = _BOOLVALUE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.BoolValue)
))
_sym_db.RegisterMessage(BoolValue)
Payload = _reflection.GeneratedProtocolMessageType('Payload', (_message.Message,), dict(
DESCRIPTOR = _PAYLOAD,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.Payload)
))
_sym_db.RegisterMessage(Payload)
EchoStatus = _reflection.GeneratedProtocolMessageType('EchoStatus', (_message.Message,), dict(
DESCRIPTOR = _ECHOSTATUS,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.EchoStatus)
))
_sym_db.RegisterMessage(EchoStatus)
SimpleRequest = _reflection.GeneratedProtocolMessageType('SimpleRequest', (_message.Message,), dict(
DESCRIPTOR = _SIMPLEREQUEST,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.SimpleRequest)
))
_sym_db.RegisterMessage(SimpleRequest)
SimpleResponse = _reflection.GeneratedProtocolMessageType('SimpleResponse', (_message.Message,), dict(
DESCRIPTOR = _SIMPLERESPONSE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.SimpleResponse)
))
_sym_db.RegisterMessage(SimpleResponse)
StreamingInputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingInputCallRequest', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGINPUTCALLREQUEST,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallRequest)
))
_sym_db.RegisterMessage(StreamingInputCallRequest)
StreamingInputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingInputCallResponse', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGINPUTCALLRESPONSE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.StreamingInputCallResponse)
))
_sym_db.RegisterMessage(StreamingInputCallResponse)
ResponseParameters = _reflection.GeneratedProtocolMessageType('ResponseParameters', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEPARAMETERS,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.ResponseParameters)
))
_sym_db.RegisterMessage(ResponseParameters)
StreamingOutputCallRequest = _reflection.GeneratedProtocolMessageType('StreamingOutputCallRequest', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGOUTPUTCALLREQUEST,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallRequest)
))
_sym_db.RegisterMessage(StreamingOutputCallRequest)
StreamingOutputCallResponse = _reflection.GeneratedProtocolMessageType('StreamingOutputCallResponse', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGOUTPUTCALLRESPONSE,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.StreamingOutputCallResponse)
))
_sym_db.RegisterMessage(StreamingOutputCallResponse)
ReconnectParams = _reflection.GeneratedProtocolMessageType('ReconnectParams', (_message.Message,), dict(
DESCRIPTOR = _RECONNECTPARAMS,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.ReconnectParams)
))
_sym_db.RegisterMessage(ReconnectParams)
ReconnectInfo = _reflection.GeneratedProtocolMessageType('ReconnectInfo', (_message.Message,), dict(
DESCRIPTOR = _RECONNECTINFO,
__module__ = 'messages_pb2'
# @@protoc_insertion_point(class_scope:grpc.testing.ReconnectInfo)
))
_sym_db.RegisterMessage(ReconnectInfo)
# @@protoc_insertion_point(module_scope)
| 24,548
| 36.083082
| 2,192
|
py
|
grpc
|
grpc-master/test/http2_test/test_rst_after_header.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http2_base_server
class TestcaseRstStreamAfterHeader(object):
"""
In response to an incoming request, this test sends headers, followed by
a reset stream frame. Client asserts that the RPC failed.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers[
"RequestReceived"
] = self.on_request_received
def get_base_server(self):
return self._base_server
def on_request_received(self, event):
# send initial headers
self._base_server.on_request_received_default(event)
# send reset stream
self._base_server.send_reset_stream()
| 1,276
| 32.605263
| 76
|
py
|
grpc
|
grpc-master/test/http2_test/test_max_streams.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import http2_base_server
import hyperframe.frame
class TestcaseSettingsMaxStreams(object):
"""
This test sets MAX_CONCURRENT_STREAMS to 1 and asserts that at any point
only 1 stream is active.
"""
def __init__(self):
self._base_server = http2_base_server.H2ProtocolBaseServer()
self._base_server._handlers["DataReceived"] = self.on_data_received
self._base_server._handlers["ConnectionMade"] = self.on_connection_made
def get_base_server(self):
return self._base_server
def on_connection_made(self):
logging.info("Connection Made")
self._base_server._conn.initiate_connection()
self._base_server._conn.update_settings(
{hyperframe.frame.SettingsFrame.MAX_CONCURRENT_STREAMS: 1}
)
self._base_server.transport.setTcpNoDelay(True)
self._base_server.transport.write(
self._base_server._conn.data_to_send()
)
def on_data_received(self, event):
self._base_server.on_data_received_default(event)
sr = self._base_server.parse_received_data(event.stream_id)
if sr:
logging.info("Creating response of size = %s" % sr.response_size)
response_data = self._base_server.default_response_data(
sr.response_size
)
self._base_server.setup_send(response_data, event.stream_id)
# TODO (makdharma): Add assertion to check number of live streams
| 2,065
| 35.892857
| 79
|
py
|
grpc
|
grpc-master/doc/python/sphinx/conf.py
|
# Copyright 2018 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -- Path setup --------------------------------------------------------------
import os
import sys
PYTHON_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
'..', '..', 'src', 'python')
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_channelz'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_health_checking'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_reflection'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_status'))
sys.path.insert(0, os.path.join(PYTHON_FOLDER, 'grpcio_testing'))
# -- Project information -----------------------------------------------------
project = 'gRPC Python'
copyright = '2020, The gRPC Authors'
author = 'The gRPC Authors'
# Import generated grpc_version after the path been modified
import grpc_version
version = '.'.join(grpc_version.VERSION.split('.')[:3])
release = grpc_version.VERSION
if 'dev' in grpc_version.VERSION:
branch = 'master'
else:
branch = 'v%s.%s.x' % tuple(grpc_version.VERSION.split('.')[:2])
# -- General configuration ---------------------------------------------------
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = None
# --- Extensions Configuration -----------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.autodoc.typehints',
]
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_special_with_doc = True
autodoc_default_options = {
'members': None,
}
autodoc_mock_imports = ["envoy"]
autodoc_typehints = 'description'
# -- HTML Configuration -------------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'fixed_sidebar': True,
'page_width': '1140px',
'show_related': True,
'analytics_id': 'UA-60127042-1',
'description': grpc_version.VERSION,
'show_powered_by': False,
}
html_static_path = ["_static"]
# -- Options for manual page output ------------------------------------------
man_pages = [(master_doc, 'grpcio', 'grpcio Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
texinfo_documents = [
(master_doc, 'grpcio', 'grpcio Documentation', author, 'grpcio',
'One line description of project.', 'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
epub_title = project
epub_exclude_files = ['search.html']
# -- Options for todo extension ----------------------------------------------
todo_include_todos = True
# -- Options for substitutions -----------------------------------------------
rst_epilog = '.. |grpc_types_link| replace:: https://github.com/grpc/grpc/blob/%s/include/grpc/impl/codegen/grpc_types.h' % branch
| 3,633
| 31.738739
| 130
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/gaze_prediction_and_evaluation.py
|
"""
The code for computing the saliency metrics is adapted from
https://github.com/tarunsharma1/saliency_metrics/blob/master/salience_metrics.py
"""
import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda import BDDA
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Feature Training and Test')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--best', default='', type=str, metavar='PATH', help='path to best checkpoint (default: none)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--no_train', action='store_true', default=False)
parser.add_argument('--gridheight', default=16, type=int, metavar='N',
help='number of rows in grid')
parser.add_argument('--gridwidth', default=16, type=int, metavar='N',
help='number of columns in grid ')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--traingrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for training images')
parser.add_argument('--valgrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for validation images')
parser.add_argument('--testgrid', default='', type=str, metavar='PATH', help='path to txt with grid entries for test images')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
parser.add_argument('--lstm', default=False, action='store_true', help='use lstm module')
parser.add_argument('--convlstm', default=False, action='store_true', help='use convlstm module')
parser.add_argument('--sequence', default=6, type=int, metavar='N', help='sequence length for lstm module')
def main():
args = parser.parse_args()
dim = args.gridwidth*args.gridheight
th = 1/dim
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
model = network.Net(args.gridheight, args.gridwidth)
if args.lstm:
model = network.LstmNet(args.gridheight, args.gridwidth)
if args.convlstm:
model = network.ConvLSTMNet(args.gridheight, args.gridwidth, args.sequence)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# define loss function (criterion) and optimizer
criterion = nn.BCEWithLogitsLoss().cuda(args.gpu)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
betas=(0.9, 0.999), eps=1e-08,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'], False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
if not args.no_train:
traindir = os.path.join(args.data, 'training')
valdir = os.path.join(args.data, 'validation')
train_dataset = BDDA("training", args.traingrid, traindir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
val_dataset = BDDA("validation", args.valgrid, valdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle= True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", args.testgrid, testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
best_loss = 1000000
if not args.no_train:
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
loss1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = loss1 < best_loss
best_loss = min(loss1, best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
}, is_best, args.best)
if args.best:
if os.path.isfile(args.best):
print("=> loading checkpoint '{}'".format(args.best))
checkpoint = torch.load(args.best)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'], False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.best, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
test(test_loader, model, criterion, args)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
if is_best:
torch.save(state, filename)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses))
return loss
def test(test_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
model.eval()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
heightfactor = 576//args.gridheight
widthfactor = 1024//args.gridwidth
smoothing = GaussianSmoothing(1, 5, 1).cuda(args.gpu)
with torch.no_grad():
end = time.time()
for i, (input, target, gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
output = torch.sigmoid(output)
heatmap = grid2heatmap(output,[heightfactor,widthfactor],[args.gridheight,args.gridwidth],args)
heatmap = F.interpolate(heatmap, size=[36, 64], mode='bilinear', align_corners=False)
heatmap = smoothing(heatmap)
heatmap = F.pad(heatmap, (2, 2, 2, 2), mode='constant')
heatmap = heatmap.view(heatmap.size(0),-1)
heatmap = F.softmax(heatmap,dim=1)
# normalize
heatmap -= heatmap.min(1, keepdim=True)[0]
heatmap /= heatmap.max(1, keepdim=True)[0]
heatmap = heatmap.view(-1,1,36,64)
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
##### compute object-level metrics
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
losses.update(loss.item(), input.size(0))
kld_losses.update(kld, input.size(0))
cc_losses.update(c, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def grid2heatmap(grid, size, num_grid, args):
"""
Rearrange and expand gridvector of size (gridheight*gridwidth) to size (576 x 1024) by duplicating values
:param grid: output vector
:param size: (H,W) of one expanded grid cell
:param num_grids: (H,W) = grid dimension
:param args: parser arguments
:return: 2D grid of size (576 x 1024)
"""
new_heatmap = torch.zeros(grid.size(0),size[0]*num_grid[0],size[1]*num_grid[1])
for i, item in enumerate(grid):
idx = torch.nonzero(item)
if idx.nelement() == 0:
print('Empty')
continue
for x in idx:
test = new_heatmap[i,x//num_grid[1]*size[0]:(x//num_grid[1]+1)*size[0],x%num_grid[1]*size[1]:(x%num_grid[1]+1)*size[1]]
new_heatmap[i,x//num_grid[1]*size[0]:(x//num_grid[1]+1)*size[0],x%num_grid[1]*size[1]:(x%num_grid[1]+1)*size[1]] = item[x]
output = new_heatmap.unsqueeze(1).cuda(args.gpu)
return output
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def kullback_leibler_divergence(y_true, y_pred, eps=1e-7):
"""
Kullback-Leiber divergence (sec 4.2.3 of [1]). Assumes shape (b, 1, h, w) for all tensors.
:param y_true: groundtruth.
:param y_pred: prediction.
:param eps: regularization epsilon.
:return: loss value (one symbolic value per batch element).
"""
P = y_pred
P = P / (eps + torch.sum(P, dim=[1, 2, 3], keepdim=True))
Q = y_true
Q = Q / (eps + torch.sum(Q, dim=[1, 2, 3], keepdim=True))
kld = torch.sum(Q * torch.log(eps + Q/(eps + P)), dim=[1, 2, 3])
return kld
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / (2 * std)) ** 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, groups=self.groups)
if __name__ == '__main__':
main()
| 22,026
| 36.717466
| 134
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/extract_features.py
|
"""
The following code is adapted from the file detect.py of https://github.com/ultralytics/yolov5 (Release 5.0)
"""
import os
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
save_output = SaveOutput()
hook_handles = []
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
def detect(opt):
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
names = model.module.names if hasattr(model, 'module') else model.names # get class names
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
count = 0
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
# prepare feature extraction
model.model[22].register_forward_hook(get_activation('after22')) # 22 is before last BottleneckCSP
pred = model(img, augment=opt.augment)[0]
# save extracted features
imagename = (path.split('/')[-1]).split('.')[0]
tensor = activation['after22'].data.cpu()
torch.save(tensor, os.path.join(opt.features, imagename +'.pt'))
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms,
max_det=opt.max_det)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
print('s', s)
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or opt.save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}')
plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness)
if opt.save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--features', metavar='DIR', help='path to folder where to save features')
opt = parser.parse_args()
print(opt)
check_requirements(exclude=('tensorboard', 'pycocotools', 'thop'))
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect(opt=opt)
strip_optimizer(opt.weights)
else:
detect(opt=opt)
| 10,476
| 45.358407
| 118
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/network.py
|
"""
The convolutional LSTM is adapted from
https://github.com/yaorong0921/Driver-Intention-Prediction/blob/master/models/convolution_lstm.py
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self, gridwidth, gridheight):
super().__init__()
self.conv1 = nn.Conv2d(512, 16, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.fc3 = nn.Linear(960, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
x = self.conv1(x)
x = self.pool(x)
x = torch.flatten(x, 1)
x = self.fc3(x)
return x
class LstmNet(nn.Module):
def __init__(self, gridwidth, gridheight):
super().__init__()
self.conv1 = nn.Conv2d(512, 16, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.lstm = nn.LSTM(
input_size=16*6*10,
hidden_size=256,
num_layers=1,
batch_first=True)
self.fc3 = nn.Linear(256, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
batch_size, timesteps, C, H, W = x.size()
c_in = x.view(batch_size * timesteps, C, H, W)
c_out = self.pool(self.conv1(c_in))
r_in = c_out.view(batch_size, timesteps, -1)
r_out, (h_n, h_c) = self.lstm(r_in)
return self.fc3(r_out[:, -1, :])
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size):
super(ConvLSTMCell, self).__init__()
assert hidden_channels % 2 == 0
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_features = 4
self.padding = int((kernel_size - 1) / 2)
self.Wxi = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whi = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxf = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whf = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxc = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Whc = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wxo = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)
self.Who = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)
self.Wci = None
self.Wcf = None
self.Wco = None
def forward(self, x, h, c):
ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)
cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)
cc = cf * c + ci * torch.tanh(self.Wxc(x) + self.Whc(h))
co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)
ch = co * torch.tanh(cc)
return ch, cc
def init_hidden(self, batch_size, hidden, shape):
if self.Wci is None:
self.Wci = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
self.Wcf = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
self.Wco = Variable(torch.zeros(1, hidden, shape[0], shape[1]))
else:
assert shape[0] == self.Wci.size()[2], 'Input Height Mismatched!'
assert shape[1] == self.Wci.size()[3], 'Input Width Mismatched!'
return (Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])),
Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])))
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1]):
super(ConvLSTM, self).__init__()
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input):
internal_state = []
outputs = []
for step in range(self.step):
x = input
for i in range(self.num_layers):
# all cells are initialized in the first step
name = 'cell{}'.format(i)
if step == 0:
bsize, _, height, width = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=(height, width))
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
# only record effective steps
if step in self.effective_step:
outputs.append(x)
return outputs, (x, new_c)
class ConvLSTMNet(nn.Module):
def __init__(self, gridheight, gridwidth, seqlen):
super().__init__()
self.conv1 = nn.Conv2d(512, 128, (1, 1), stride=(1, 1))
self.pool = nn.AdaptiveAvgPool2d((6,10))
self.convlstm = ConvLSTM(input_channels=128, hidden_channels=[16], kernel_size=3, step=seqlen,
effective_step=[seqlen-1])
self.fc3 = nn.Linear(960, gridheight*gridwidth)
def forward(self, x):
x = torch.squeeze(x)
x = x.float()
batch_size, timesteps, C, H, W = x.size()
c_in = x.view(batch_size * timesteps, C, H, W)
c_out = self.conv1(c_in)
c_out = self.pool(c_out)
output_convlstm, _ = self.convlstm(c_out)
x = output_convlstm[0]
x = x.view(batch_size, timesteps, -1)
x = self.fc3(x[:, -1, :])
return x
| 6,594
| 38.491018
| 119
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/compute_grid.py
|
import os
from PIL import Image
import numpy as np
import math
import argparse
parser = argparse.ArgumentParser(description='Create ground-truth grid for gaze maps')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--grids', metavar='PATH', help='path to output txt file')
parser.add_argument('--gridheight', default=16, type=int, metavar='N',
help='number of rows in grid')
parser.add_argument('--gridwidth', default=16, type=int, metavar='N',
help='number of columns in grid ')
def main():
args = parser.parse_args()
f = open(args.grids, "a")
count = 0
for root, dirs, files in os.walk(args.gazemaps):
for item in files:
entry = []
name = item.split('_') # we expect gaze map image names of form videoNr_frameNr
nn = name[0] + '_' + name[-1]
entry.append(nn)
gt = np.array(Image.open(os.path.join(args.gazemaps,item)).convert('L').crop((0,96,1024,672)))
gt = normalizeData(gt)
gt_grid = grid_calculation(gt,[args.gridheight, args.gridwidth])
entry.extend(gt_grid)
s = ','.join(map(str, entry))
f.write(s+'\n') # python will convert \n to os.linesep
count += 1
if count%500 == 0:
print("Count: %d"%count)
print(count)
def grid_calculation(image_array, num_grid):
# num_grid = [h,w]
# initialize grid
grid = np.zeros(num_grid[0]*num_grid[1], dtype=float)
# filter gaze map pixel smaller 0.15
Y,X = np.nonzero(image_array>0.15)
image_size = image_array.shape
x_grid = image_size[1]//num_grid[1]
y_grid = image_size[0]//num_grid[0]
# count pixel (>=0.15) in each grid cell and divide by overall sum
for x,y in zip(X,Y):
idx = y//y_grid * num_grid[1] + x//x_grid
grid[idx] += 1
grid = grid/np.sum(grid)
return grid
def normalizeData(s_map):
norm_s_map = (s_map - np.min(s_map))/((np.max(s_map)-np.min(s_map))*1.0)
return norm_s_map
if __name__ == '__main__':
main()
| 1,947
| 28.074627
| 97
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/bdda.py
|
import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def img_id(self):
return (self._data[0]) # image index starts with 1
@ property
def grids(self):
grid=[]
for item in self._data[1:]:
grid.append(float(item))
return grid
class BDDA(Dataset):
"""
BDDA feature class.
"""
def __init__(self, subset, file, feature_path, threshold, gazemap_path, lstm, seqlen):
"""
Args:
"""
self.subset = subset
self.file = file
self.feature_path = feature_path
self.gazemap_path = gazemap_path
self.threshold = threshold
self.mean = torch.zeros(1024)
self.std = torch.ones(1024)
self.lstm = lstm
self.seqlen = seqlen
self._parse_list()
self.transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
def _parse_list(self):
self.img_list = []
tmp = [x.strip().split(',') for x in open(self.file)]
img_list = [VideoRecord(item) for item in tmp]
if self.lstm:
self.img_dict = {}
clips = list(set([x.split('_')[0] for x in open(self.file)]))
for clip in clips:
self.img_dict[clip] = []
for item in img_list:
img_name = item.img_id.split('.')[0]
feature_name = img_name + ".pt"
clip = item.img_id.split('.')[0].split('_')[0]
img_nr = item.img_id.split('.')[0].split('_')[1]
grid = item.grids
feature_path = os.path.join(self.feature_path,feature_name)
if os.path.exists(feature_path) and not all(math.isnan(y) for y in grid):
self.img_list.append(item)
self.img_dict[clip].append(img_nr)
else:
print('error loading feature:', feature_path)
for key in self.img_dict:
self.img_dict[key].sort()
print('video number in %s: %d'%(self.subset,(len(self.img_list))))
else:
for item in img_list:
img_name = item.img_id.split('.')[0]
feature_name = img_name + ".pt"
grid = item.grids
feature_path = os.path.join(self.feature_path,feature_name)
if os.path.exists(feature_path) and not all(math.isnan(y) for y in grid):
self.img_list.append(item)
else:
print('error loading feature:', feature_path)
print('video number in %s: %d'%(self.subset,(len(self.img_list))))
def _normalizeData(self, data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
"""
"""
if self.lstm:
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
feature_name = img_name + ".pt"
clip = record.img_id.split('.')[0].split('_')[0]
img_nr = record.img_id.split('.')[0].split('_')[1]
dict_idx = self.img_dict[clip].index(img_nr)
feature_path = os.path.join(self.feature_path,feature_name)
feature = torch.load(feature_path)
# create list with previous features, last one is original
feature_list = []
first = dict_idx-(self.seqlen-1)
duplicate = 0
if first < 0:
duplicate = abs(first) # if there are not enough previous features, we duplicate original to get seqlen
first = 0
for idx in range(first, dict_idx+1):
feature_name2 = clip+'_'+self.img_dict[clip][idx]+ ".pt"
feature_path2 = os.path.join(self.feature_path,feature_name2)
feature2 = torch.load(feature_path2)
feature_list.append(feature2)
if duplicate:
for i in range(duplicate):
feature_list.append(feature)
feature = torch.stack(feature_list)
else:
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
feature_name = img_name + ".pt"
feature_path = os.path.join(self.feature_path,feature_name)
feature = torch.load(feature_path)
if self.subset == 'training':
feature = feature + torch.randn(512,12,20)
# set grid values <= 1/gridsize to 0, others to 1
grid = np.array(record.grids)
grid[grid>self.threshold] = 1.0
grid[grid<=self.threshold] = 0.0
grid = grid.astype(np.float32)
if self.subset == 'test':
name = record.img_id.split('_')
gaze_file = name[0] + '_pure_hm_' + name[1]
gaze_gt = Image.open(os.path.join(self.gazemap_path, gaze_file)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gaze_gt = self.transform(gaze_gt)
gaze_gt = self._normalizeData(gaze_gt)
return feature, grid, gaze_gt, img_name
else:
return feature, grid
| 5,490
| 31.684524
| 134
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/evaluation_otherModel.py
|
import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda_otherModels import BDDA
import numpy as np
from PIL import Image
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Evalutaion of given Predictions')
parser.add_argument('--data', metavar='DIR', help='path to dataset')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--predictions', metavar='DIR', help='path to predicted gaze maps folder')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
def main():
args = parser.parse_args()
dim = 256
th = 1/dim
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
test(test_loader, args)
def test(test_loader, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
with torch.no_grad():
end = time.time()
for i, (gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
first = True
for img in img_names:
heatfile = img+ '.jpg'
heatmap = Image.open(os.path.join(args.predictions ,heatfile))#.convert('L')#.crop((0,96,1024,672)) #left,top,right,bottom
heatmap = transform(heatmap)
heatmap = normalizeData(heatmap)
if first:
heatmap_batch = heatmap[None]
first = False
else:
heatmap_batch = torch.cat((heatmap_batch, heatmap[None]), 0)
heatmap = heatmap_batch
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
##### compute object-level metrics
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
print(heatmap_obj_max)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
kld_losses.update(kld, input.size(0))
cc_losses.update(c, input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 9,879
| 34.285714
| 138
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/compute_BDDA_baseline.py
|
import os
from PIL import Image
import numpy as np
import math
import argparse
import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
parser = argparse.ArgumentParser(description='Create average baseline for given gaze map images')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
def main():
transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
args = parser.parse_args()
count = 0
for root, dirs, files in os.walk(args.gazemaps):
for item in files:
gt = Image.open(os.path.join(args.gazemaps,item)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gt = np.array(transform(gt))
gt = normalizeData(gt)
if np.isnan(np.sum(gt)):
continue
if count == 0:
sum = gt
else:
sum += gt
count += 1
if count%500 == 0:
print("Count: %d"%count)
sum = normalizeData(sum)
a_file = open("avgBaseline.txt", "w")
for row in sum:
np.savetxt(a_file, row)
a_file.close()
def normalizeData(s_map):
norm_s_map = (s_map - np.min(s_map))/((np.max(s_map)-np.min(s_map))*1.0)
return norm_s_map
if __name__ == '__main__':
main()
| 1,331
| 22.368421
| 110
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/flops_counter.py
|
'''
Copyright (C) 2019 Sovrasov V. - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
* You should have received a copy of the MIT license with
* this file. If not visit https://opensource.org/licenses/MIT
'''
# this script can be used to evaluate model complexity with the following lines:
#===========================================================================================
#flops, params = get_model_complexity_info(model, (input-channel,input-height,input-width), as_strings=True, print_per_layer_stat=True)
#print('{:<30} {:<8}'.format('Computational complexity: ', flops))
#print('{:<30} {:<8}'.format('Number of parameters: ', params))
#===========================================================================================
import sys
import torch
import torch.nn as nn
import numpy as np
def get_model_complexity_info(model, input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None, ost=sys.stdout):
assert type(input_res) is tuple
assert len(input_res) >= 2
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = flops_model(batch)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
if print_per_layer_stat:
print_model_with_flops(flops_model, flops_count, params_count, ost=ost)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
else:
if units == 'M':
return str(round(params_num / 10.**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10.**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_flops(model, total_flops, total_params, units='GMac',
precision=3, ost=sys.stdout):
def accumulate_params(self):
return get_model_parameters_number(self)
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
if total_params == 0:
return ', '.join([params_to_string(accumulated_params_num, units='M', precision=precision),
'{:.3%} Params'.format(0),
flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
else:
return ', '.join([params_to_string(accumulated_params_num, units='M', precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters())# if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding variables necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output):
input = input[0]
output_last_dim = output.shape[-1] # pytorch checks dimensions, so here we don't care much
module.__flops__ += int(np.prod(input.shape) * output_last_dim)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
input_height, input_width = input.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = kernel_height * kernel_width * in_channels * filters_per_channel
active_elements_count = batch_size * input_height * input_width
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
output_height, output_width = output.shape[2:]
bias_flops = out_channels * batch_size * output_height * output_height
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
print('Warning! No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
MODULES_MAPPING = {
# convolutions
torch.nn.Conv1d: conv_flops_counter_hook,
torch.nn.Conv2d: conv_flops_counter_hook,
torch.nn.Conv3d: conv_flops_counter_hook,
# activations
torch.nn.ReLU: relu_flops_counter_hook,
torch.nn.PReLU: relu_flops_counter_hook,
torch.nn.ELU: relu_flops_counter_hook,
torch.nn.LeakyReLU: relu_flops_counter_hook,
torch.nn.ReLU6: relu_flops_counter_hook,
torch.nn.SiLU : relu_flops_counter_hook,
# poolings
torch.nn.MaxPool1d: pool_flops_counter_hook,
torch.nn.AvgPool1d: pool_flops_counter_hook,
torch.nn.AvgPool2d: pool_flops_counter_hook,
torch.nn.MaxPool2d: pool_flops_counter_hook,
torch.nn.MaxPool3d: pool_flops_counter_hook,
torch.nn.AvgPool3d: pool_flops_counter_hook,
nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
# BNs
torch.nn.BatchNorm1d: bn_flops_counter_hook,
torch.nn.BatchNorm2d: bn_flops_counter_hook,
torch.nn.BatchNorm3d: bn_flops_counter_hook,
# FC
torch.nn.Linear: linear_flops_counter_hook,
# Upscale
torch.nn.Upsample: upsample_flops_counter_hook,
# Deconvolution
torch.nn.ConvTranspose2d: deconv_flops_counter_hook,
}
def is_supported_instance(module):
if type(module) in MODULES_MAPPING:
return True
return False
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
else:
print('missing module', module)
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# --- Masked flops counting
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| 14,874
| 33.512761
| 139
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/evaluation_BDDA_baseline.py
|
import os
import argparse
import time
import shutil
import math
import torch
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import functional as F
import torchvision
import numbers
import network
from bdda_otherModels import BDDA
import numpy as np
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score
parser = argparse.ArgumentParser(description='Baseline Evaluation')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N',
help='mini-batch size (default: 128), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--gazemaps', metavar='DIR', help='path to gaze map images folder')
parser.add_argument('--yolo5bb', metavar='DIR', help='path to folder of yolo5 bounding box txt files')
parser.add_argument('--baseline', metavar='DIR', help='path to txt file with baseline')
parser.add_argument('--visualizations', metavar='DIR', help='path to folder for visalization of predicted gaze maps and target')
parser.add_argument('--threshhold', default=0.5, type=float, metavar='N', help='threshold for object-level evaluation')
def main():
dim = 256
th = 1/dim
args = parser.parse_args()
testdir = os.path.join(args.data,'test')
test_dataset = BDDA("test", args.testgrid, testdir, th, args.gazemaps, (args.lstm or args.convlstm), args.sequence)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
test(test_loader, args)
def test(test_loader, args):
batch_time = AverageMeter()
losses = AverageMeter()
kld_losses = AverageMeter()
cc_losses = AverageMeter()
tp = 0
fp = 0
fn = 0
all_count = 0
hm_max_values = []
gt = []
i = 0
#smoothing = GaussianSmoothing(1, 5, 1).cuda(args.gpu)
with torch.no_grad():
end = time.time()
for i, (gaze_gt, img_names) in enumerate(test_loader):
if args.gpu is not None:
gaze_gt = gaze_gt.cuda(args.gpu, non_blocking=True)
heatmap = torch.from_numpy(np.loadtxt(args.baseline)).unsqueeze(0)
heatmap = heatmap.unsqueeze(0).repeat(gaze_gt.size(0), 1, 1, 1)
for j in range(heatmap.size(0)):
img_name = img_names[j]
heatmap_img = heatmap[j] # predicted gaze map
gt_img = gaze_gt[j] # original gaze map
filename = os.path.join(args.yolo5bb, img_name+".txt")
if os.path.exists(filename):
with open(filename) as f:
for linestring in f:
all_count += 1
line = linestring.split()
width = float(line[3])
height = float(line[4])
x_center = float(line[1])
y_center = float(line[2])
x_min, x_max, y_min, y_max = bb_mapping(x_center, y_center, width, height)
# find maximum pixel value within object bounding box
gt_obj = gt_img[0, y_min:y_max+1, x_min:x_max+1]
gt_obj_max = torch.max(gt_obj)
heatmap_obj = heatmap_img[0, y_min:y_max+1, x_min:x_max+1]
heatmap_obj_max = torch.max(heatmap_obj)
# object is recognized if maximum pixel value is higher than th
gt_obj_recogn = gt_obj_max > 0.15
hm_obj_recogn = heatmap_obj_max > args.threshhold
hm_max_values.append(heatmap_obj_max)
if gt_obj_recogn:
gt.append(1)
else:
gt.append(0)
if (hm_obj_recogn and gt_obj_recogn):
tp +=1
elif (hm_obj_recogn and not gt_obj_recogn):
fp += 1
elif (not hm_obj_recogn and gt_obj_recogn):
fn += 1
visualization(heatmap_img.cpu(), gt_img.cpu(), args.visualizations, img_name)
kld = kl(heatmap, gaze_gt)
c = cc(heatmap,gaze_gt)
kld_losses.update(kld, heatmap.size(0))
cc_losses.update(c, heatmap.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'KL {kl.val:.4f} ({kl.avg:.4f})\t'
'CC {cc.val:.4f} ({cc.avg:.4f})\t'
.format(
i, len(test_loader), batch_time=batch_time, loss=losses, kl=kld_losses, cc=cc_losses))
precision = tp/(tp+fp)
recall = tp/(tp+fn)
tn = all_count-tp-fp-fn
acc = (tp+tn)/all_count
f1 = 2*precision*recall/(precision+recall)
print('Object-level results:')
print('tp:', tp, 'fp:', fp, 'tn:', tn, 'fn:', fn, 'sum:', all_count)
print('prec:', precision, 'recall:', recall, 'f1', f1, 'acc', acc)
print('AUC:', roc_auc_score(gt, hm_max_values))
def bb_mapping(x_center_rel, y_center_rel, width_rel, height_rel, img_width = 64, img_height = 36):
"""
Compute absolute bounding boxes values for given image size and given relative parameters
:param x_center_rel: relative x value of bb center
:param y_center_rel: relative y value of bb center
:param width_rel: relative width
:param height_rel: relative height
:return: absolute values of bb borders
"""
width_abs = width_rel*img_width
height_abs = height_rel*img_height
x_center_abs = x_center_rel*img_width
y_center_abs = y_center_rel*img_height
x_min = int(math.floor(x_center_abs - 0.5 * width_abs))
x_max = int(math.floor(x_center_abs + 0.5 * width_abs))
y_min = int(math.floor(y_center_abs - 0.5 * height_abs))
y_max = int(math.floor(y_center_abs + 0.5 * height_abs))
bb = [x if x>=0 else 0 for x in [x_min, x_max, y_min, y_max]]
return bb
def cc(s_map_all,gt_all):
eps = 1e-07
bs = s_map_all.size()[0]
r = 0
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map_norm = (s_map - torch.mean(s_map))/(eps + torch.std(s_map))
gt_norm = (gt - torch.mean(gt))/(eps + torch.std(gt))
a = s_map_norm.cpu()
b = gt_norm.cpu()
r += torch.sum(a*b) / (torch.sqrt(torch.sum(a*a) * torch.sum(b*b))+eps)
return r/bs
def kl(s_map_all, gt_all):
dims = len(s_map_all.size())
bs = s_map_all.size()[0]
eps = torch.tensor(1e-07)
kl = 0
if dims > 3:
for i in range(0, bs):
s_map = s_map_all[i,:,:,:].squeeze()
gt = gt_all[i,:,:,:].squeeze()
s_map = s_map/(torch.sum(s_map)*1.0 + eps)
gt = gt/(torch.sum(gt)*1.0 + eps)
gt = gt.to('cpu')
s_map = s_map.to('cpu')
kl += torch.sum(gt * torch.log(eps + gt/(s_map + eps)))
return kl/bs
def normalizeData(data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def visualization(heatmap, gt, path, nr):
heatmap = torchvision.transforms.functional.to_pil_image(heatmap)
gt = torchvision.transforms.functional.to_pil_image(gt)
heatmap.save(os.path.join(path, '%s_pred.png'%nr))
gt.save(os.path.join(path, '%s_gt.png'%nr))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
| 8,598
| 34.097959
| 128
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/roc_th.py
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import f1_score,precision_score,recall_score, roc_curve, roc_auc_score, auc
"""
For each model there has to be a txt file with the gt values gt_modelname.txt and one txt file with the hm_max_values hm_modelname.txt.
(gt values and hm_max_values are computed in the method test() within the gaze_prediction_and_evaluation.py file)
"""
models_filename = ['yolo5', 'yolo3', 'centertrack', 'bdda', 'dreyeve', 'mlnet', 'picanet', 'baseline']
models_plot = ['YOLOv5', 'YOLOv3', 'CenterTrack', 'BDD-A', 'DR(eye)VE', 'ML-Net', 'PiCANet', 'Baseline']
dataset = ["BDDA", "DREYEVE"]
for i in range(len(models_filename)):
mod = models_filename[i]
nm = models_plot[i]
gt = []
hm = []
try:
with open("gt_"+mod+".txt", "r") as f:
for line in f:
gt.append(float(line.strip()))
with open("hm_"+mod+".txt", "r") as f:
for line in f:
hm.append(float(line.strip()))
except:
continue
print('AUC:'+nm, roc_auc_score(gt, hm))
fpr, tpr, threshold = roc_curve(gt, hm)
gmeans = np.sqrt(tpr * (1-fpr))
optimal_idx = np.argmax(gmeans)
print('Best Threshold=%f' % threshold[optimal_idx])
plt.plot(fpr, tpr, label = nm + ", Th = %.2f" % threshold[optimal_idx])
plt.plot(fpr[optimal_idx], tpr[optimal_idx], marker='X', markersize=7, color="black")
plt.title('Receiver Operating Characteristic')
plt.legend(loc = 'lower right')
plt.plot([0.0,1.0],[1.0,0.0],color='black',linestyle='dashed', linewidth=0.5)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('./roc.png')
| 1,715
| 32.647059
| 135
|
py
|
driver-gaze-yolov5
|
driver-gaze-yolov5-main/More files/bdda_otherModels.py
|
import os
import numpy as np
import math
import torch
from torch.utils.data import Dataset
import cv2
from utils.utils import *
import torchvision
from PIL import Image
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def img_id(self):
return (self._data[0]) # image index starts with 1
@ property
def grids(self):
grid=[]
for item in self._data[1:]:
grid.append(float(item))
return grid
class BDDA(Dataset):
"""
BDDA feature class.
"""
def __init__(self, file, threshold, gazemap_path):
"""
Args:
"""
self.file = file
self.gazemap_path = gazemap_path
self.threshold = threshold
self.mean = torch.zeros(1024)
self.std = torch.ones(1024)
self._parse_list()
self.transform = torchvision.transforms.Compose(
[torchvision.transforms.Resize([36,64]),
torchvision.transforms.ToTensor()])
def _parse_list(self):
self.img_list = []
tmp = [x.strip().split(',') for x in open(self.file)]
img_list = [VideoRecord(item) for item in tmp]
self.img_list = img_list
def _normalizeData(self, data):
return (data - torch.min(data)) / (torch.max(data) - torch.min(data))
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
"""
"""
record = self.img_list[index]
img_name = record.img_id.split('.')[0]
name = record.img_id.split('_')
gaze_file = name[0] + '_pure_hm_' + name[1]
gaze_gt = Image.open(os.path.join(self.gazemap_path, gaze_file)).convert('L').crop((0,96,1024,672)) #left,top,right,bottom
gaze_gt = self.transform(gaze_gt)
gaze_gt = self._normalizeData(gaze_gt)
return gaze_gt, img_name
| 1,891
| 24.226667
| 130
|
py
|
NMTGMinor
|
NMTGMinor-master/online.py
|
#!/usr/bin/env python
import os
from onmt.online_translator import TranslatorParameter, OnlineTranslator
import sys
filename="/model/model.conf"
t = OnlineTranslator(filename)
print("NMT initialized")
sys.stdout.flush()
while True:
# sys.stderr.write("Waiting for data\n");
line = sys.stdin.readline();
# sys.stderr.write("Input: "+line+"\n");
print(t.translate(line))
# sys.stderr.write("Translation done\n");
sys.stdout.flush()
| 454
| 22.947368
| 72
|
py
|
NMTGMinor
|
NMTGMinor-master/preprocess_classify.py
|
#!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import h5py as h5
import numpy as np
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict(lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
# note: no need for special tokens for the target (labels)
vocab = onmt.Dict(lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="h5", output_format="raw"):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
print('[INFO] Processing %s ...' % src_file)
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
# don't use bos_word and eos_word here
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
if len(src_sizes) != len(tgt_sizes):
print("Warning: data size mismatched.")
else:
tgt = None
tgt_sizes = None
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def main():
dicts = {}
# maybe not necessary
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict:
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
# tgt_langs = opt.train_tgt_lang.split("|")
langs = src_langs
langs = sorted(list(set(langs)))
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
print(dicts['langs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# the target "dictionary" contains a list of labels
if opt.asr:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.asr:
print('Preparing for acoustic classification model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
# train = dict()
# train['src'], train['tgt'] =
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads)
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
else:
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
start = time.time()
print('Binarizing data to train translation models...')
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose)
n_samples = len(src_data)
if n_input_files == 1:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
# SAVE DATA
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem', 'scp']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 33,644
| 42.024297
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='NMTGMinor',
version='0.1',
author='quanpn90',
author_email='ngoc.pham@kit.edu',
url='https://github.com/quanpn90/NMTGMinor',
license='MIT',
scripts=[
'flask_online.py',
'online.py',
'preprocess.py',
'train.py',
'translate_distributed.py',
'translate.py',
],
packages=find_packages(),
install_requires=['torch', 'torchaudio', 'soundfile'])
| 532
| 25.65
| 60
|
py
|
NMTGMinor
|
NMTGMinor-master/classify.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.predictor import Predictor
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-sub_model', required=False, default="",
help='Path to (secondary) model .pt file')
parser.add_argument('-pretrained_classifier', required=False, default="",
help='Path to external classifier model .pt file')
parser.add_argument('-streaming', action="store_true",
help="""Use streaming mode (for model with streaming)""")
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-sub_src', required=False, default="",
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-past_src', required=False, default="",
help='Past Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_repeat_ngram_size', type=int, default=0,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-no_buffering', action='store_true',
help='To remove buffering for transformer models (slower but more memory)')
parser.add_argument('-src_align_right', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-dynamic_quantile', type=int, default=0,
help='To use int8 in decoding (for linear and LSTM layers only).')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
parser.add_argument('-global_search', action='store_true',
help='Using the global beam search for streaming')
parser.add_argument('-dynamic_max_len', action='store_true',
help='Using the fast decoder')
parser.add_argument('-dynamic_max_len_scale', type=float, default=5.0,
help='Using the fast decoder')
parser.add_argument('-dynamic_min_len_scale', type=float, default=0.0,
help='Using the fast decoder')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def get_sentence_from_tokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
pred_score_total, pred_words_total, gold_score_total, gold_words_total = 0, 0, 0, 0
src_batches = []
src_batch, tgt_batch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
in_file = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
# import kaldiio
# from kaldiio import ReadHelper
from onmt.data.audio_utils import ArkLoader
audio_data = open(opt.src)
scp_reader = ArkLoader()
else:
in_file = open(opt.src)
# if opt.streaming:
# if opt.batch_size != 1:
# opt.batch_size = 1
# print("Warning: Streaming only works with batch size 1")
#
# if opt.global_search:
# print(" Using global search algorithm ")
# from onmt.inference.global_translator import GlobalStreamTranslator
# translator = GlobalStreamTranslator(opt)
# else:
# translator = StreamTranslator(opt)
# else:
# if opt.fast_translate:
# translator = FastTranslator(opt)
#
# # TODO: load sub model
# else:
# translator = onmt.Translator(opt)
predictor = Predictor(opt)
# Audio processing for the source batch
if opt.encoder_type == "audio":
"""
For Audio we will have to group samples by the total number of frames in the source
"""
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
concats = opt.concat.split("|")
n_models = len(opt.model.split("|"))
if len(concats) == 1:
concats = concats * n_models
assert len(concats) == n_models, "The number of models must match the number of concat configs"
for j, _ in enumerate(concats):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
scp_path = next(audio_data).strip().split()[1]
line = scp_reader.load_mat(scp_path)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
if past_line: past_line = past_line[0::opt.stride]
line = torch.from_numpy(line)
past_line = torch.from_numpy(past_line) if past_audio_data else None
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batches[0]), len(tgt_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
# count, pred_score, pred_words, gold_score, goldWords = \
# translate_batch(opt, tgtF, count, outF, translator,
# src_batches[0], tgt_batch, pred_batch,
# pred_score,
# pred_length, gold_score,
# num_gold_words,
# all_gold_scores, opt.input_type)
# pred_score_total += pred_score
# pred_words_total += pred_words
# gold_score_total += gold_score
# gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# handling different concatenation settings (for example 4|1|4)
for j, concat_ in enumerate(concats):
concat = int(concat_)
line = original_line
# TODO: move this block to function
if concat != 1:
add = (concat - line.size()[0] % concat) % concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // concat, line.size()[1] * concat))
if past_audio_data:
add = (concat - past_line.size()[0] % concat) % concat
z = torch.FloatTensor(add, past_line.size()[1]).zero_()
past_line = torch.cat((past_line, z), 0)
past_line = past_line.reshape((past_line.size()[0] // concat, past_line.size()[1] * concat))
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_score = predictor.predict(src_batches)
count = get_result(pred_score, predictor, count, outF)
src_batch, tgt_batch = [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# Text processing for MT
else:
raise NotImplementedError
if tgtF:
tgtF.close()
def get_result(pred_score, predictor, count, outF):
tgt_dict = predictor.tgt_dict.idxToLabel
for b in range(len(pred_score)):
count += 1
out_string = "PRED %d " % count
for i in range(len(pred_score[b])):
prob = pred_score[b][i] * 100
label = tgt_dict[i]
out_string += "%s: %.2f ; " % (label, prob)
print(out_string)
outF.write(out_string + '\n')
outF.flush()
return count
#print("PRED SCORE", pred_score[b])
#
# pred_score_total = sum(score[0].item() for score in pred_score)
# pred_words_total = sum(len(x[0]) for x in pred_batch)
# gold_score_total = 0
# gold_words_total = 0
# if tgtF is not None:
# gold_score_total = sum(gold_score).item()
# gold_words_total = num_gold_words
#
# for b in range(len(pred_batch)):
#
# count += 1
#
# if not opt.print_nbest:
# outF.write(get_sentence_from_tokens(pred_batch[b][0], input_type) + '\n')
# outF.flush()
# else:
# for n in range(opt.n_best):
# idx = n
# output_sent = get_sentence_from_tokens(pred_batch[b][idx], input_type)
# out_str = "%s ||| %.4f" % (output_sent, pred_score[b][idx])
# outF.write(out_str + '\n')
# outF.flush()
#
# if opt.verbose:
# if opt.encoder_type == "text":
# src_sent = " ".join(src_batch[b])
# print('SRC %d: %s' % (count, src_sent))
# print('PRED %d: %s' % (count, get_sentence_from_tokens(pred_batch[b][0], input_type)))
# print("PRED SCORE: %.4f" % pred_score[b][0])
#
# if tgtF is not None:
# tgt_sent = get_sentence_from_tokens(tgt_batch[b], input_type)
# if translator.tgt_dict.lower:
# tgt_sent = tgt_sent.lower()
# print('GOLD %d: %s ' % (count, tgt_sent))
# print("GOLD SCORE: %.4f" % gold_score[b])
# print()
# if opt.print_nbest:
# print('\n BEST HYP:')
# for n in range(opt.n_best):
# idx = n
# out_str = "%s ||| %.4f" % (" ".join(pred_batch[b][idx]), pred_score[b][idx])
# print(out_str)
# print('')
#
# return count, pred_score_total, pred_words_total, gold_score_total, gold_words_total
if __name__ == "__main__":
main()
| 17,820
| 39.687215
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/eval_autoencoder.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from ae.Evaluator import Evaluator
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-autoencoder', required=True,
help='Path to model .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-src_img_dir', default="",
help='Source image directory')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=2048,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-representation', type=str, default="EncoderHiddenState",
help="Representation for Autoencoder")
parser.add_argument('-auto_encoder_hidden_size', type=int, default=100,
help="Hidden size of autoencoder")
parser.add_argument('-auto_encoder_drop_out', type=float, default=0,
help="Use drop_out in autoencoder")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / wordsTotal,
name, math.exp(-scoreTotal / wordsTotal)))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
srcBatch, tgtBatch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
evaluator = Evaluator(opt)
if (opt.src == "stdin"):
inFile = sys.stdin
opt.batch_size = 1
elif (opt.encoder_type == "audio"):
inFile = h5.File(opt.src, 'r')
else:
inFile = open(opt.src)
if (opt.encoder_type == "audio"):
for i in range(len(inFile)):
if (opt.stride == 1):
line = torch.from_numpy(np.array(inFile[str(i)]))
else:
line = torch.from_numpy(np.array(inFile[str(i)])[0::opt.stride])
if (opt.concat != 1):
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] / opt.concat, line.size()[1] * opt.concat))
if line is not None:
# ~ srcTokens = line.split()
srcBatch += [line]
if tgtF:
# ~ tgtTokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgtTokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgtTokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
r = evaluator.evalASR(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
srcBatch, tgtBatch = [], []
if len(srcBatch) != 0:
r = evaluator.evalASR(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
else:
for line in addone(inFile):
if line is not None:
if opt.input_type == 'word':
srcTokens = line.split()
elif opt.input_type == 'char':
srcTokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
srcBatch += [srcTokens]
if tgtF:
# ~ tgtTokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgtTokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgtTokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
r = evaluator.eval(srcBatch,tgtBatch)
if(opt.representation == "EncoderHiddenState"):
outputResults(srcBatch,r,outF)
elif(opt.representation == "DecoderHiddenState" or opt.representation == "Probabilities"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputResults(tgtBatch,r,outF)
elif(opt.representation == "EncoderDecoderHiddenState"):
for i in range(len(tgtBatch)):
tgtBatch[i].append("EOS");
outputAlignment(srcBatch,tgtBatch,r,outF)
srcBatch, tgtBatch = [], []
if tgtF:
tgtF.close()
def outputResults(srcBatch,r,outF):
x=0
j=0
out= []
for i in range(len(srcBatch)):
out.append([])
while(x < r.size(0)):
for i in range(len(srcBatch)):
if(j < len(srcBatch[i])):
out[i].append(str(r[x].item()))
x+=1
j += 1
for i in range(len(out)):
for j in range(len(out[i])):
outF.write(out[i][j])
outF.write(' ')
outF.write("\n")
outF.flush()
def outputAlignment(srcBatch,tgtBatch,r,outF):
for b in range(len(srcBatch)):
for i in range(len(srcBatch[b])):
for j in range (len(tgtBatch[b])):
outF.write("%i-%i#%f " % (i,j,r[i,j,b]))
outF.write("\n")
if __name__ == "__main__":
main()
| 9,790
| 35.808271
| 102
|
py
|
NMTGMinor
|
NMTGMinor-master/translate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-sub_model', required=False, default="",
help='Path to (secondary) model .pt file')
parser.add_argument('-pretrained_classifier', required=False, default="",
help='Path to external classifier model .pt file')
parser.add_argument('-streaming', action="store_true",
help="""Use streaming mode (for model with streaming)""")
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-vocab_id_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-sub_src', required=False, default="",
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-past_src', required=False, default="",
help='Past Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-src_atb', default='nothingness',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-tgt_atb', default='nothingness',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="scp", required=False,
help="Format of asr data (only scp supported for now)")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-prefix_string', default='',
help="""Prefix string for all of the translation""")
parser.add_argument('-anti_prefix_string', default='',
help="""Prefix string for all of the translation""")
parser.add_argument('-prefix_tgt', default='',
help="""Prefix file that contains prefix string for each of the translation
(must use either this or prefix_string, not both""")
parser.add_argument('-force_bos', action="store_true",
help="""Force the first token in the prefix to be bos""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-min_sent_length', type=int, default=0,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-no_repeat_ngram_size', type=int, default=0,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-no_buffering', action='store_true',
help='To remove buffering for transformer models (slower but more memory)')
parser.add_argument('-src_align_right', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-dynamic_quantile', type=int, default=0,
help='To use int8 in decoding (for linear and LSTM layers only).')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
parser.add_argument('-global_search', action='store_true',
help='Using the global beam search for streaming')
parser.add_argument('-dynamic_max_len', action='store_true',
help='Using the fast decoder')
parser.add_argument('-dynamic_max_len_scale', type=float, default=5.0,
help='Using the fast decoder')
parser.add_argument('-dynamic_min_len_scale', type=float, default=0.0,
help='Using the fast decoder')
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def report_score(name, score_total, words_total):
try:
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
except OverflowError:
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, -100 / (words_total + 1e-9),
name, math.exp(-100 / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def len_penalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def get_sentence_from_tokens(tokens, ids, input_type, external_tokenizer=None):
if external_tokenizer is None:
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
else:
sent = external_tokenizer.decode(ids, True, True).strip()
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
pred_score_total, pred_words_total, gold_score_total, gold_words_total = 0, 0, 0, 0
src_batches = []
src_batch, tgt_batch, past_src_batch = [], [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
# import kaldiio
# from kaldiio import ReadHelper
from onmt.data.audio_utils import ArkLoader
audio_data = open(opt.src)
scp_reader = ArkLoader()
elif opt.asr_format == 'wav':
audio_data = open(opt.src)
else:
in_file = open(opt.src)
sub_src = None
if opt.streaming:
if opt.batch_size != 1:
opt.batch_size = 1
print("Warning: Streaming only works with batch size 1")
if opt.global_search:
print(" Using global search algorithm ")
from onmt.inference.global_translator import GlobalStreamTranslator
translator = GlobalStreamTranslator(opt)
else:
translator = StreamTranslator(opt)
else:
translator = FastTranslator(opt)
if hasattr(translator, 'tgt_external_tokenizer'):
external_tokenizer = translator.tgt_external_tokenizer
else:
external_tokenizer = None
# if "mbart-large-50" in opt.external_tokenizer.lower():
# print("[INFO] Using the external MBART50 tokenizer...")
#
# from transformers import MBart50TokenizerFast
# external_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_lang=opt.src_lang)
#
# elif "bart" in opt.external_tokenizer.lower():
# print("[INFO] Using the external BART tokenizer...")
#
# from transformers import BartTokenizer
# external_tokenizer = BartTokenizer.from_pretrained(opt.external_tokenizer)
#
# elif "m2m100" in opt.external_tokenizer.lower():
# print("[INFO] Using the external %s tokenizer..." % opt.external_tokenizer)
# from transformers import M2M100Tokenizer
# external_tokenizer = M2M100Tokenizer.from_pretrained(opt.external_tokenizer, src_lang=opt.src_lang)
#
# elif opt.external_tokenizer is None or len(opt.external_tokenizer) == 0:
# external_tokenizer = None
# else:
# raise NotImplementedError
prefix = None
prefix_reader = None
if len(opt.prefix_string) > 0:
assert len(opt.prefix_tgt) <= 0
prefix = [opt.prefix_string]
elif len(opt.prefix_tgt) > 0:
prefix = list()
prefix_reader = open(opt.prefix_tgt)
anti_prefix = None
if len(opt.anti_prefix_string) > 0:
anti_prefix = opt.anti_prefix_string
# Audio processing for the source batch
if opt.encoder_type == "audio" and opt.asr_format in ['scp', 'kaldi']:
"""
For Audio we will have to group samples by the total number of frames in the source
"""
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
concats = opt.concat.split("|")
n_models = len(opt.model.split("|"))
if len(concats) == 1:
concats = concats * n_models
assert len(concats) == n_models, "The number of models must match the number of concat configs"
for j, _ in enumerate(concats):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
scp_path = next(audio_data).strip().split()[1]
line = scp_reader.load_mat(scp_path)
if past_audio_data:
scp_path = next(past_audio_data).strip().split()[1]
past_line = scp_reader.load_mat(scp_path)
else:
past_line = None
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
if past_line: past_line = past_line[0::opt.stride]
line = torch.from_numpy(line)
past_line = torch.from_numpy(past_line) if past_audio_data else None
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
if past_audio_data:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch),
len(past_src_batches[0]))
else:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, \
pred_score, pred_length, gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches, tgt_batch,
sub_src_data=sub_src_batch, past_src_data=past_src_batches,
type='asr',
prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords = \
translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
# only refresh when prefix reader is not None
if prefix is not None and prefix_reader is not None:
prefix = []
# handling different concatenation settings (for example 4|1|4)
for j, concat_ in enumerate(concats):
concat = int(concat_)
line = original_line
# TODO: move this block to function
if concat != 1:
add = (concat - line.size()[0] % concat) % concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // concat, line.size()[1] * concat))
if past_audio_data:
add = (concat - past_line.size()[0] % concat) % concat
z = torch.FloatTensor(add, past_line.size()[1]).zero_()
past_line = torch.cat((past_line, z), 0)
past_line = past_line.reshape((past_line.size()[0] // concat, past_line.size()[1] * concat))
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches,
tgt_batch,
past_src_data=past_src_batches,
sub_src_data=sub_src_batch,
type='asr', prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords \
= translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
# Text processing for MT
elif opt.asr_format == 'wav':
from onmt.utils import safe_readaudio
past_audio_data = open(opt.past_src) if opt.past_src else None
past_src_batches = list()
s_prev_context = []
t_prev_context = []
i = 0
n_models = len(opt.model.split("|"))
for j in range(n_models):
src_batches.append(list()) # We assign different inputs for each model in the ensemble
if past_audio_data:
past_src_batches.append(list())
sub_src = open(opt.sub_src) if opt.sub_src else None
sub_src_batch = list()
while True:
try:
line = next(audio_data).strip().split()
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
if past_audio_data:
past_line = next(past_audio_data).strip().split()
if len(past_line) == 2:
wav_path = past_line[1]
start = 0
end = 0
else:
wav_path, start, end = past_line[1], float(past_line[2]), float(past_line[3])
past_line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
else:
past_line = None
except StopIteration:
break
original_line = line
src_length = line.size(0)
"""
Handling different concatenation size for different models, to make ensembling possible
"""
if _is_oversized(src_batches[0], src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
if past_audio_data:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch),
len(past_src_batches[0]))
else:
print("Batch sizes :", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches, tgt_batch, sub_src_data=sub_src_batch, past_src_data=past_src_batches, type='asr',
prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords = \
translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, sub_src_batch = [], [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
# handling different concatenation settings (for example 4|1|4)
for j in range(n_models):
src_batches[j].append(line)
if past_audio_data: past_src_batches[j].append(past_line)
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# read the "sub" input which is text based
# this is done for ensemble between a speech model and a text based model
if opt.sub_src:
sline = sub_src.readline().strip()
if opt.input_type == 'word':
src_tokens = sline.split()
elif opt.input_type == 'char':
src_tokens = list(sline.strip())
sub_src_batch += [src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
# catch the last batch
if len(src_batches[0]) != 0:
print("Batch size:", len(src_batches[0]), len(tgt_batch), len(sub_src_batch))
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batches,
tgt_batch,
past_src_data=past_src_batches,
sub_src_data=sub_src_batch, type='asr', prefix=prefix, anti_prefix=anti_prefix)
print("Result:", len(pred_batch))
count, pred_score, pred_words, gold_score, goldWords \
= translate_batch(opt, tgtF, count, outF, translator,
src_batches[0], tgt_batch, pred_batch, pred_ids,
pred_score,
pred_length, gold_score,
num_gold_words,
all_gold_scores, opt.input_type, external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
for j, _ in enumerate(src_batches):
src_batches[j] = []
if past_audio_data: past_src_batches[j] = []
if prefix is not None and prefix_reader is not None:
prefix = []
else:
past_text_data = open(opt.past_src) if opt.past_src else None
for line in addone(in_file):
if line is not None:
if opt.input_type == 'word':
src_tokens = line.split()
elif opt.input_type == 'char':
src_tokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
if line.strip() == "":
if opt.streaming:
print("Found a document break")
translator.reset_stream()
continue
src_batch += [src_tokens]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgt_tokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
if past_text_data:
if opt.input_type == 'word':
past_src_tokens = past_text_data.readline().split()
elif opt.input_type == 'char':
past_src_tokens = list(past_text_data.readline().strip())
else:
raise NotImplementedError("Input type unknown")
past_src_batch += [past_src_tokens]
if prefix is not None and prefix_reader is not None:
prefix.append(prefix_reader.readline().strip())
if len(src_batch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(src_batch) == 0:
break
# actually done beam search from the model
pred_batch, pred_ids, pred_score, pred_length, \
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batch,
tgt_batch,
past_src_batch,
prefix=prefix, anti_prefix=anti_prefix)
# convert output tensor to words
count, pred_score, pred_words, gold_score, goldWords = translate_batch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
pred_batch, pred_ids,
pred_score, pred_length,
gold_score, num_gold_words,
all_gold_scores, opt.input_type,
external_tokenizer=external_tokenizer)
pred_score_total += pred_score
pred_words_total += pred_words
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch, past_src_batch = [], [], []
if prefix is not None and prefix_reader is not None:
prefix = []
if opt.verbose:
report_score('PRED', pred_score_total, pred_words_total)
if tgtF: report_score('GOLD', gold_score_total, gold_words_total)
if tgtF:
tgtF.close()
if opt.dump_beam:
json.dump(translator.beam_accum, open(opt.dump_beam, 'w'))
if prefix_reader is not None:
prefix_reader.close()
if sub_src is not None:
sub_src.close()
def translate_batch(opt, tgtF, count, outF, translator, src_batch, tgt_batch,
pred_batch, pred_ids, pred_score, pred_length,
gold_score,
num_gold_words, all_gold_scores, input_type, external_tokenizer=None):
original_pred_batch = pred_batch
original_pred_score = pred_score
# if print n best list then do not print the scores
if opt.print_nbest:
opt.normalize = False
if opt.normalize and not opt.fast_translate:
pred_batch_ = []
pred_score_ = []
for bb, ss, ll in zip(pred_batch, pred_score, pred_length):
# ~ ss_ = [s_/numpy.maximum(1.,len(b_)) for b_,s_,l_ in zip(bb,ss,ll)]
length = [len(i) for i in [''.join(b_) for b_ in bb]]
ss_ = [len_penalty(s_, max(l_, 1), opt.alpha) for b_, s_, l_ in zip(bb, ss, length)]
ss_origin = [(s_, len(b_)) for b_, s_, l_ in zip(bb, ss, ll)]
sidx = numpy.argsort(ss_)[::-1]
# ~ print(ss_, sidx, ss_origin)
pred_batch_.append([bb[s] for s in sidx])
pred_score_.append([ss_[s] for s in sidx])
pred_batch = pred_batch_
pred_score = pred_score_
pred_score_total = sum(score[0].item() for score in pred_score)
pred_words_total = sum(len(x[0]) for x in pred_batch)
gold_score_total = 0
gold_words_total = 0
if tgtF is not None:
gold_score_total = sum(gold_score).item()
gold_words_total = num_gold_words
for b in range(len(pred_batch)):
count += 1
if not opt.print_nbest:
outF.write(
get_sentence_from_tokens(pred_batch[b][0], pred_ids[b][0], input_type, external_tokenizer) + '\n')
outF.flush()
else:
for n in range(opt.n_best):
idx = n
output_sent = get_sentence_from_tokens(pred_batch[b][idx], pred_ids[b][idx], input_type,
external_tokenizer)
out_str = "%s ||| %.4f" % (output_sent, pred_score[b][idx])
outF.write(out_str + '\n')
outF.flush()
if opt.verbose:
if opt.encoder_type == "text":
src_sent = " ".join(src_batch[b])
print('SRC %d: %s' % (count, src_sent))
print('PRED %d: %s' % (
count, get_sentence_from_tokens(pred_batch[b][0], pred_ids[b][0], input_type, external_tokenizer)))
print("PRED SCORE: %.4f" % pred_score[b][0])
if tgtF is not None:
tgt_sent = get_sentence_from_tokens(tgt_batch[b], input_type)
if translator.tgt_dict.lower:
tgt_sent = tgt_sent.lower()
print('GOLD %d: %s ' % (count, tgt_sent))
print("GOLD SCORE: %.4f" % gold_score[b])
print()
if opt.print_nbest:
print('\n BEST HYP:')
for n in range(opt.n_best):
idx = n
out_str = "%s ||| %.4f" % (" ".join(pred_batch[b][idx]), pred_score[b][idx])
print(out_str)
print('')
return count, pred_score_total, pred_words_total, gold_score_total, gold_words_total
if __name__ == "__main__":
main()
| 35,674
| 43.04321
| 121
|
py
|
NMTGMinor
|
NMTGMinor-master/verify_wav2vec2_feat.py
|
#!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp', default='output.scp',
help="""Path to output the feature paths""")
# parser.add_argument('-ark_output', default='output.ark',
# help="""Path to output the features""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def verify_ark(utts, features, padding_mask, scp_data):
# cache_wav = ''
features = features.cpu()
bsz, seq_len, feat_size = features.size()
lengths = (1 - padding_mask).sum(dim=1)
# print(features.size(), lengths)
assert(torch.max(lengths).item() == seq_len)
assert len(utts) == bsz
for i in range(bsz):
feature_ = features[i, 0:lengths[i]]
feature_ = feature_.numpy()
precomputed_feature_ = scp_data[i]
np.testing.assert_allclose(
feature_,
precomputed_feature_,
atol=1e-5, rtol=1e-5)
# if opt.fp16:
# feature_ = feature_.astype(np.float16)
# seg_name = utts[i]
# dic = {seg_name: feature_}
#
# from onmt.data.kaldiio.io import write_ark_file
# write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecExtractor
model = FairseqWav2VecExtractor(opt.model)
# if opt.fp16:
# model = model.half()
if opt.cuda:
model = model.cuda()
model.eval()
print(model.wav2vec_encoder.feature_extractor)
audio_data = open(opt.src)
scp_data = open(opt.scp)
from onmt.data.audio_utils import ArkLoader
scp_reader = ArkLoader()
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
src_scp = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
# read the wav samples
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
# read the scp data
scp_path = next(scp_data).strip().split()[1]
scp_line = scp_reader.load_mat(scp_path)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
# write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
verify_ark(src_utts, features, padding_mask, src_scp)
src_batch = []
src_utts = []
src_scp = []
src_batch.append(line)
src_utts.append(utt)
src_scp.append(scp_line)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
verify_ark(src_utts, features, padding_mask, src_scp)
src_batch = []
src_utts = []
src_scp = []
ark_out.close()
scp_out.close()
| 8,036
| 32.348548
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/rematch_language_embedding.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import copy
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.constants import add_tokenidx
from options import backward_compatible
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model_src', required=True,
help='Path to model .pt file')
parser.add_argument('-model_tgt', required=True,
help='Path to model .pt file')
parser.add_argument('-model_out', required=True,
help='Path to model .pt file')
opt = parser.parse_args()
# first, we load the model src
print(opt.model_src)
checkpoint = torch.load(opt.model_src, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
model_opt = backward_compatible(model_opt)
src_dicts = checkpoint['dicts']
# update special tokens
onmt.constants = add_tokenidx(model_opt, onmt.constants, src_dicts)
model = build_model(model_opt, checkpoint['dicts'])
model.load_state_dict(checkpoint['model'])
# now load the 2nd model
print(opt.model_tgt)
checkpoint = torch.load(opt.model_tgt, map_location=lambda storage, loc: storage)
# model_opt = checkpoint['opt']
# model_opt = backward_compatible(model_opt)
tgt_dicts = checkpoint['dicts']
# tgt_model = build_model(model_opt, checkpoint['dicts'])
# check the embedding
lang_emb = copy.deepcopy(model.encoder.language_embedding.weight.data)
new_emb = copy.deepcopy(lang_emb)
for key in src_dicts['langs']:
old_idx = src_dicts['langs'][key]
new_idx = tgt_dicts['langs'][key]
print(key, old_idx, "->", new_idx)
new_emb[new_idx].copy_(lang_emb[old_idx])
model.encoder.language_embedding.weight.data.copy_(new_emb)
model_state_dict = model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': tgt_dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
print("Saving converted model to %s" % opt.model_out)
torch.save(save_checkpoint, opt.model_out)
| 2,213
| 26
| 81
|
py
|
NMTGMinor
|
NMTGMinor-master/extract_wav2vec2_codebook.py
|
#!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp_output', default='output.scp',
help="""Path to output the feature paths""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def write_codes(utts, codes, padding_mask, out_scp, opt):
# cache_wav = ''
codes = codes.cpu()
bsz, seq_len, groups = codes.size()
if padding_mask is not None:
lengths = (1 - padding_mask.long()).sum(dim=1).long()
else:
lengths = torch.LongTensor(bsz).fill_(seq_len)
assert len(utts) == bsz
for i in range(bsz):
code_ = codes[i, 0:lengths[i], :]
code_ = code_.prod(dim=-1, keepdim=False)
code_ = code_.tolist()
code_ = " ".join([str(c) for c in code_])
seg_name = utts[i]
# print(seg_name)
# print(code_)
out_scp.write(code_ + "\n")
# dic = {seg_name: feature_}
# from onmt.data.kaldiio.io import write_ark_file
# write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
print("Loading Wav2vec 2.0 model ...")
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecQuantizer
model = FairseqWav2VecQuantizer(opt.model)
print("Done")
if opt.cuda:
model = model.cuda()
model.eval()
scp_out = open(opt.scp_output, 'w')
audio_data = open(opt.src)
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with torch.no_grad():
with autocast(enabled=opt.fp16):
codes, padding_mask = model(batch)
write_codes(src_utts, codes, padding_mask, scp_out, opt)
src_batch = []
src_utts = []
src_batch.append(line)
src_utts.append(utt)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
codes, padding_mask = model(batch)
write_codes(src_utts, codes, padding_mask, scp_out, opt)
src_batch = []
src_utts = []
ark_out.close()
scp_out.close()
| 7,425
| 32.151786
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/average_checkpoints_auto.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import os, sys
from onmt.model_factory import build_model, build_language_model, build_classifier, optimize_model
from copy import deepcopy
from onmt.utils import checkpoint_paths, normalize_gradients
import glob
from onmt.constants import add_tokenidx
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-type', default='seq2seq', help="""Type of models""")
parser.add_argument('-lm', action='store_true',
help='Language model (default is seq2seq model')
parser.add_argument('-sort_by_date', action='store_true',
help='Sort the model files by date')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-top', type=int, default=10,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def custom_build_model(opt, dict, lm=False, type='seq2seq', constants=None):
if type == 'seq2seq':
if not lm:
model = build_model(opt, dict, False, constants)
else:
model = build_language_model(opt, dict)
elif type == 'classifier':
model = build_classifier(opt, dict)
optimize_model(model)
return model
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
path = opt.models
if not opt.sort_by_date:
existed_save_files = checkpoint_paths(path)
else:
existed_save_files = glob.glob(path + "/" + "*.pt")
existed_save_files.sort(key=os.path.getmtime)
print("\n".join(existed_save_files))
# print(existed_save_files)
models = existed_save_files
# take the top
models = models[:opt.top]
# print(models)
#
n_models = len(models)
#
# checkpoint for main model
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
# best_checkpoint = {
# 'model': deepcpy(main_checkpoint['model']),
# 'dicts': main_checkpoint['dicts'],
# 'opt': main_checkpoint['opt'],
# 'epoch': -1,
# 'iteration': -1,
# 'batchOrder': None,
# 'optim': None
# }
best_checkpoint = main_checkpoint
# print("Saving best model to %s" % opt.output + ".top")
# torch.save(best_checkpoint, opt.output + ".top")
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
onmt.constants = add_tokenidx(model_opt, onmt.constants, dicts)
constants = onmt.constants
# only create the object
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
print(model_opt.layers)
main_model = custom_build_model(model_opt, checkpoint['dicts'], lm=opt.lm, type=opt.type, constants=constants)
print("Loading main model from %s ..." % models[0])
try:
main_model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
main_model.load_state_dict(checkpoint['model'], strict=True)
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
# checkpoint for models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# model_opt.enc_not_load_state = True
# model_opt.dec_not_load_state = True
model_opt.enc_state_dict = None
model_opt.dec_state_dict = None
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = custom_build_model(model_opt, checkpoint['dicts'], lm=opt.lm, type=opt.type)
current_model.eval()
print("Loading model from %s ..." % models[i])
try:
current_model.load_state_dict(checkpoint['model'])
except RuntimeError as e:
current_model.load_state_dict(checkpoint['model'], strict=True)
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1. / n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 5,784
| 27.925
| 114
|
py
|
NMTGMinor
|
NMTGMinor-master/autoencoder.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
from onmt.modules.loss import NMTLossFunc
from onmt.model_factory import build_model, init_model_parameters
from ae.Autoencoder import Autoencoder
from ae.Trainer import AETrainer
parser = argparse.ArgumentParser(description='train.py')
onmt.markdown.add_md_help_argument(parser)
from options import make_parser
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
parser.add_argument('-representation', type=str, default="EncoderHiddenState",
help="Representation for Autoencoder")
parser.add_argument('-auto_encoder_hidden_size', type=int, default=100,
help="Hidden size of autoencoder")
parser.add_argument('-auto_encoder_drop_out', type=float, default=0,
help="Use drop_out in autoencoder")
parser.add_argument('-auto_encoder_type', type=str, default="Baseline",
help="Use drop_out in autoencoder")
opt = parser.parse_args()
print(opt)
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def main():
if opt.data_format == 'raw':
start = time.time()
print("Loading data from '%s'" % opt.data)
if opt.data.endswith(".train.pt"):
print("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
print("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
trainData = onmt.Dataset(dataset['train']['src'],
dataset['train']['tgt'], opt.batch_size_words,
data_type=dataset.get("type", "text"),
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier)
validData = onmt.Dataset(dataset['valid']['src'],
dataset['valid']['tgt'], opt.batch_size_words,
data_type=dataset.get("type", "text"),
batch_size_sents=opt.batch_size_sents)
dicts = dataset['dicts']
if ("src" in dicts):
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
print(' * number of training sentences. %d' %
len(dataset['train']['src']))
print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
elif opt.data_format == 'bin':
from onmt.data.indexed_dataset import IndexedInMemoryDataset
dicts = torch.load(opt.data + ".dict.pt")
# ~ train = {}
train_path = opt.data + '.train'
train_src = IndexedInMemoryDataset(train_path + '.src')
train_tgt = IndexedInMemoryDataset(train_path + '.tgt')
trainData = onmt.Dataset(train_src,
train_tgt, opt.batch_size_words,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier)
valid_path = opt.data + '.valid'
valid_src = IndexedInMemoryDataset(valid_path + '.src')
valid_tgt = IndexedInMemoryDataset(valid_path + '.tgt')
validData = onmt.Dataset(valid_src,
valid_tgt, opt.batch_size_words,
batch_size_sents=opt.batch_size_sents)
else:
raise NotImplementedError
print('Building model...')
model = build_model(opt, dicts)
autoencoder = Autoencoder(model,opt)
""" Building the loss function """
loss_function = nn.MSELoss(size_average=False)
nParams = sum([p.nelement() for p in autoencoder.parameters()])
print('* number of parameters: %d' % nParams)
# load nmt model
checkpoint = None
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
else:
raise NotImplementedError
if checkpoint is not None:
print('Loading model from checkpoint at %s' % opt.load_from)
model.load_state_dict(checkpoint['model'])
del checkpoint['model']
del checkpoint['optim']
del checkpoint
if len(opt.gpus) > 1 or opt.virtual_gpu > 1:
# ~ trainer = MultiGPUXETrainer(model, loss_function, trainData, validData, dataset, opt)
raise NotImplementedError("Warning! Multi-GPU training is not fully tested and potential bugs can happen.")
else:
trainer = AETrainer(autoencoder,model, loss_function, trainData, validData, dicts, opt)
trainer.run(save_file=False)
if __name__ == "__main__":
main()
| 5,533
| 34.703226
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/sample_lm.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
from onmt.model_factory import build_model
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# opt.model should be a string of models, split by |
models = opt.models.split("|")
# print(models)
n_models = len(models)
print("Loading main model from %s ..." % models[0])
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
main_model = build_model(model_opt, checkpoint['dicts'])
main_model.load_state_dict(checkpoint['model'])
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
print("Loading model from %s ..." % models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = build_model(model_opt, checkpoint['dicts'])
current_model.load_state_dict(checkpoint['model'])
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1./n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration' : -1,
'batchOrder' : None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 3,478
| 26.393701
| 96
|
py
|
NMTGMinor
|
NMTGMinor-master/rescore.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='rescore.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=2048,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / (wordsTotal + 1e-9),
name, math.exp(-scoreTotal / (wordsTotal + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0
srcBatch, tgtBatch, tgtScores = [], [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
if opt.dump_beam != "":
import json
translator.initBeamAccum()
# here we are trying to
inFile = None
if opt.src == "stdin":
inFile = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
inFile = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
import kaldiio
from kaldiio import ReadHelper
audio_data = iter(ReadHelper('scp:' + opt.src))
else:
inFile = open(opt.src)
# initialize the rescorer (with models) and stuff
rescorer = onmt.Rescorer(opt)
if opt.encoder_type == "audio":
s_prev_context = []
t_prev_context = []
i = 0
while True:
if opt.asr_format == "h5":
if i == len(inFile):
break
line = np.array(inFile[str(i)])
i += 1
elif opt.asr_format == "scp":
try:
_, line = next(audio_data)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
line = torch.from_numpy(line)
if opt.concat != 1:
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // opt.concat, line.size()[1] * opt.concat))
if opt.previous_context > 0:
s_prev_context.append(line)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
line = torch.cat((torch.cat((s_prev_context[-i - 1], torch.zeros(1, line.size()[1]))), line))
if len(s_prev_context) > opt.previous_context:
s_prev_context = s_prev_context[-1 * opt.previous_context:]
srcBatch += [line]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
tline = tgtF.readline().strip()
twords = tline.split("|||")[0].strip()
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgt_tokens]
if len(srcBatch) < opt.batch_size:
continue
print("Batch size:", len(srcBatch), len(tgtBatch))
goldScore, numGoldWords, allGoldScores = rescorer.rescore_asr(
srcBatch, tgtBatch)
print("Result:", len(predBatch))
count = translateBatch(opt, tgtF, count, outF, translator,
srcBatch, tgtBatch, goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch, tgtScores = [], []
if len(srcBatch) != 0:
print("Batch size:", len(srcBatch), len(tgtBatch))
goldScore, numGoldWords, allGoldScores = translator.rescore_asr(srcBatch, tgtBatch)
print("Result:", len(predBatch))
count = translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores,
goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch, tgtScores = [], []
else:
for line in addone(inFile):
if line is not None:
if opt.input_type == 'word':
srcTokens = line.split()
elif opt.input_type == 'char':
srcTokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
# for each source sentence, we read in n target
for n in range(opt.n_best):
# duplicate the srcTokens
srcBatch += [srcTokens]
tgtline = tgtF.readline()
tgt_text = tgtline.strip().split(' ||| ')[0]
tgt_score = tgtline.strip().split(' ||| ')[1]
if opt.input_type == 'word':
tgt_tokens = tgt_text.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgt_text.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgtBatch += [tgt_tokens]
tgtScores += [tgt_score]
if len(srcBatch) < opt.batch_size * opt.n_best:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
goldScore, numGoldWords, allGoldScores = rescorer.rescore(srcBatch, tgtBatch)
# convert output tensor to words
count = translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores,
goldScore, numGoldWords,
allGoldScores, opt.input_type)
srcBatch, tgtBatch = [], []
if tgtF:
tgtF.close()
def translateBatch(opt, tgtF, count, outF, srcBatch, tgtBatch, tgtScores, goldScore,
numGoldWords, allGoldScores, input_type):
for b in range(len(tgtBatch)):
# if not opt.print_nbest:
# outF.write(getSentenceFromTokens(predBatch[b][0], input_type) + '\n')
# outF.flush()
# else:
# for n in range(opt.n_best):
# idx = n
# output_sent = getSentenceFromTokens(predBatch[b][idx], input_type)
# out_str = "%s ||| %.4f" % (output_sent, predScore[b][idx])
#
# print(out_str)
# outF.write(out_str + 'n')
# outF.flush()
tgtSent = getSentenceFromTokens(tgtBatch[b], input_type)
gold_score = goldScore[b]
prev_score = tgtScores[b] # string
outstr = "%s ||| %s %.4f" % (tgtSent, prev_score, gold_score)
outF.write(outstr + '\n')
outF.flush()
if opt.verbose:
if count % opt.beam_size == 0:
srcSent = getSentenceFromTokens(srcBatch[b], input_type)
print('SRC SENT %d: %s ' % (count // opt.beam_size + 1, srcSent))
print('')
print(outstr)
# if tgtF is not None:
# tgtSent = getSentenceFromTokens(tgtBatch[b], input_type)
# print('GOLD %d: %s ' % (count, tgtSent))
# print("GOLD SCORE: %.4f" % goldScore[b])
# # print("Single GOLD Scores:",end=" ")
# # for j in range(len(tgtBatch[b])):
# # print(allGoldScores[j][b].item(),end =" ")
# print ()
# if opt.print_nbest:
# print('\n BEST HYP:')
# for n in range(opt.n_best):
# idx = n
# out_str = "%s ||| %.4f" % (" ".join(predBatch[b][idx]), predScore[b][idx])
# print(out_str)
print('')
count += 1
return count
if __name__ == "__main__":
main()
| 13,352
| 38.979042
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/options.py
|
import argparse
def make_parser(parser):
# Data options
parser.add_argument('-data', required=True,
help='Path to the *-train.pt file from preprocess.py')
parser.add_argument('-data_format', required=False, default='raw',
help='Default data format: raw')
parser.add_argument('-data_cache_size', type=int, default=32,
help="""Caching for dataset (if implemented)""")
parser.add_argument('-multi_dataset', action='store_true',
help='Reading multiple datasets (sharing the same dictionary)')
parser.add_argument('-override_dict_from_checkpoint', action='store_true',
help='The dictionary will be overidden from checkpoint instead of reading from data.')
parser.add_argument('-gem_training', action='store_true',
help='Gradient Episodic Memory training')
parser.add_argument('-train_sets', default=[], nargs='+', type=int,
help="Sets of training data. For example 0 1 2")
parser.add_argument('-valid_sets', default=[], nargs='+', type=int,
help="Sets of validation data.")
parser.add_argument('-train_set_orders', default=[], nargs='+', type=int,
help="The order of the training data for gradient episodic memory. For example 0 0 1 1 (must match the number of datasets).")
parser.add_argument('-run_validation_before_training', action='store_true',
help='Run validation before training')
parser.add_argument('-estimate_fisher_information', action='store_true',
help='Only estimate Fisher Information')
parser.add_argument('-load_fisher', default='', type=str,
help="""Load the fisher information from a checkpoint.""")
parser.add_argument('-ewc_importance', type=float, default=0.0,
help='Importance of EWC penalty')
parser.add_argument('-ewc_delay', type=int, default=0,
help='EWC penalty only applies after this delay (steps)')
parser.add_argument('-ewc_normalize', action='store_true',
help='EWC penalty being normalized')
parser.add_argument('-ewc_decay_every', type=int, default=10000,
help='EWC scale reduced after these steps')
parser.add_argument('-ewc_decay_scale', type=int, default=10,
help='EWC scale reduced after these steps')
parser.add_argument('-patch_vocab_multiplier', type=int, default=1,
help='Pad vocab so that the size divides by this multiplier')
parser.add_argument('-buffer_size', type=int, default=16,
help='The iterator fills the data buffer with this size')
parser.add_argument('-num_workers', type=int, default=0,
help='Number of extra workers for data fetching. 0=uses the main process. ')
parser.add_argument('-pin_memory', action="store_true",
help='The data loader pins memory into the GPU to reduce the bottleneck between GPU-CPU')
parser.add_argument('-bayes_by_backprop', action='store_true',
help="""Using Bayes-By-Backprop models in training""")
parser.add_argument('-neg_log_sigma1', type=float, default=0,
help='Coefficient for the KL divergence term')
parser.add_argument('-neg_log_sigma2', type=float, default=6,
help='Coefficient for the KL divergence term')
parser.add_argument('-prior_pi', type=float, default=0.5,
help='Coefficient for the KL divergence term')
# MODEL UTIL
parser.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_epochN_PPL.pt where PPL is the
validation perplexity""")
parser.add_argument('-load_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-load_encoder_from', default='', type=str,
help="""Load encoder weight from a pretrained model.""")
parser.add_argument('-load_decoder_from', default='', type=str,
help="""Load encoder weight from a pretrained model.""")
parser.add_argument('-streaming', action='store_true',
help="""Using streaming in training""")
parser.add_argument('-stream_context', default='global', type=str,
help="""Using streaming in training""")
# MODEL CONFIG
parser.add_argument('-model', default='transformer',
help="Translation model. [transformer|relative_transformer ]")
parser.add_argument('-layers', type=int, default=2,
help='Number of layers in the Transformer encoder/decoder')
parser.add_argument('-encoder_layers', type=int, default=-1,
help='Number of layers in the LSTM encoder if different')
parser.add_argument('-max_pos_length', type=int, default=2048,
help='Maximum distance length for relative self-attention')
parser.add_argument('-max_src_length', type=int, default=320000,
help='Maximum source length for training')
parser.add_argument('-max_tgt_length', type=int, default=320000,
help='Maximum target length for training')
parser.add_argument('-learnable_position_encoding', action='store_true',
help="""Use embeddings as learnable position encoding.""")
parser.add_argument('-rotary_position_encoding', action='store_true',
help="""Use rotary position encoding.""")
parser.add_argument('-pos_emb_type', default='absolute',
help="Position embedding type. [absolute| relative_k| relative_kv]")
parser.add_argument('-fix_norm_output_embedding', action='store_true',
help="""Normalize the output embedding""")
# parser.add_argument('-asynchronous', action='store_true',
# help="""Different attention values for past and future""")
# parser.add_argument('-nce_noise', type=int, default=0,
# help="""Use noise contrastive estimation for the output layer.
# Default=0 (full softmax), increase to 100 to use 100 noise samples.""")
# parser.add_argument('-unidirectional', action='store_true',
# help="""Unidirectional encoder""")
parser.add_argument('-reconstruct', action='store_true',
help='Apply reconstruction with an additional decoder')
parser.add_argument('-mirror_loss', action='store_true',
help='Using mirror loss')
# parser.add_argument('-universal', action='store_true',
# help='Using one layer universally (recurrent)')
# parser.add_argument('-act', action='store_true',
# help='Using ACT for Universal models (TODO)')
# Transforer Model options
parser.add_argument('-use_language_embedding', action='store_true',
help="""Language embedding to add into the word embeddings""")
parser.add_argument('-language_embedding_type', default='sum', type=str,
help="""Language embedding combination type: sum|concat. (Concat uses more parameters)""")
parser.add_argument('-model_size', type=int, default=512,
help='Size of embedding / transformer hidden')
parser.add_argument('-inner_size', type=int, default=2048,
help='Size of inner feed forward layer')
parser.add_argument('-attribute_size', type=int, default=1,
help='Number of attributes')
parser.add_argument('-n_heads', type=int, default=8,
help='Number of heads for multi-head attention')
parser.add_argument('-checkpointing', type=int, default=0,
help='Number of checkpointed layers in the Transformer')
parser.add_argument('-attn_dropout', type=float, default=0.1,
help='Dropout probability; applied on multi-head attention.')
parser.add_argument('-emb_dropout', type=float, default=0.1,
help='Dropout probability; applied on top of embedding.')
parser.add_argument('-variational_dropout', action='store_true',
help='Apply variational dropout (same network per timestep)')
parser.add_argument('-weight_norm', action='store_true',
help='Apply weight normalization on linear modules')
parser.add_argument('-death_rate', type=float, default=0.0,
help='Stochastic layer death rate')
parser.add_argument('-death_rate_decoder', type=float, default=0.0,
help='Stochastic layer death rate')
parser.add_argument('-stochastic_sublayer', action='store_true',
help='Apply stochastic death rate for each sub-layer')
parser.add_argument('-activation_layer', default='linear_relu_linear', type=str,
help='The activation layer in each transformer block '
'linear_relu_linear|linear_swish_linear|maxout')
parser.add_argument('-time', default='positional_encoding', type=str,
help='Type of time representation positional_encoding|gru|lstm')
parser.add_argument('-residual_type', default='regular',
help='Type of residual type. regular|gated')
# parser.add_argument('-adaptive', type=str, default='shared',
# help='Universal adaptive layer. universal=UniversalTF|shared=factorized|unshared')
# Optimization options
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img].")
parser.add_argument('-input_size', type=int, default=2048,
help='Size of input features')
parser.add_argument('-init', default='normal',
help="How to init the weight. normal or uniform/xavier.")
parser.add_argument('-init_embedding', default='normal',
help="How to init the embedding matrices. Xavier or Normal.")
parser.add_argument('-batch_size_frames', type=int, default=204800,
help='Maximum batch size in frame dimension')
parser.add_argument('-batch_size_words', type=int, default=2048,
help='Maximum batch size in word dimension')
parser.add_argument('-batch_size_sents', type=int, default=99999999,
help='Maximum number of sentences in a batch')
parser.add_argument('-batch_size_update', type=int, default=-1,
help='Maximum number of words per update')
parser.add_argument('-update_frequency', type=int, default=1,
help='Maximum number of batches per update (will override the batch_size_update')
parser.add_argument('-batch_size_multiplier', type=int, default=1,
help='Maximum number of words per update')
parser.add_argument('-max_position_length', type=int, default=1024,
help='Maximum length for positional embedding')
parser.add_argument('-max_memory_size', type=int, default=1024,
help='Maximum memory size for buffering in transformer XL')
parser.add_argument('-extra_context_size', type=int, default=32,
help='Extra context size in transformer Xl')
parser.add_argument('-epochs', type=int, default=13,
help='Number of training epochs')
parser.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init)""")
parser.add_argument('-optim', default='adam',
help="Optimization method. [sgd|adagrad|adadelta|adam]")
parser.add_argument('-zeror_optim', action="store_true",
help="""Use Zero redundancy optimizer""")
parser.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to max_grad_norm""")
# Dropout
parser.add_argument('-dropout', type=float, default=0.3,
help='Dropout probability; general values for ffn and residual if set negatively')
parser.add_argument('-ffn_dropout', type=float, default=-1,
help='Dropout probability; applied at the FFN.')
parser.add_argument('-residual_dropout', type=float, default=-1,
help='Dropout probability; applied at the residual connection.')
parser.add_argument('-word_dropout', type=float, default=0.0,
help='Dropout probability; applied on embedding indices.')
parser.add_argument('-switchout', type=float, default=0.0,
help='Switchout algorithm')
# Loss function
parser.add_argument('-label_smoothing', type=float, default=0.0,
help='Label smoothing value for loss functions.')
parser.add_argument('-true_zero_grad', action="store_true",
help='truly set grad to zero instead of None.')
# parser.add_argument('-curriculum', type=int, default=-1,
# help="""For this many epochs, order the minibatches based
# on source sequence length. Sometimes setting this to 1 will
# increase convergence speed.""")
parser.add_argument('-normalize_gradient', action="store_true",
help="""Normalize the gradients by number of tokens before updates""")
# parser.add_argument('-gradient_scaler', type=int, default=1,
# help='avoid gradient overflow with fp16')
# learning rate
parser.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate. If adagrad/adadelta/adam is
used, then this is the global learning rate. Recommended
settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
parser.add_argument('-learning_rate_decay', type=float, default=1,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) epoch has gone past
start_decay_at""")
parser.add_argument('-start_decay_at', type=int, default=99999,
help="""Start decaying every epoch after and including this
epoch""")
parser.add_argument('-warmup_steps', type=int, default=4096,
help="""Number of steps to increase the lr in noam""")
parser.add_argument('-max_steps', type=int, default=100000,
help="""Number of steps to train the model""")
parser.add_argument('-noam_step_interval', type=int, default=1,
help="""How many steps before updating the parameters""")
parser.add_argument('-max_step', type=int, default=4000000,
help="""How many steps before updating the parameters""")
parser.add_argument('-starting_step', type=int, default=-1,
help="""How many steps before updating the parameters""")
parser.add_argument('-factorizing_step', type=int, default=0,
help="""How many steps before using the factorized parameters""")
parser.add_argument('-reset_optim', action='store_true',
help='Reset the optimizer running variables')
parser.add_argument('-beta1', type=float, default=0.9,
help="""beta_1 value for adam""")
parser.add_argument('-beta2', type=float, default=0.997,
help="""beta_2 value for adam""")
parser.add_argument('-weight_decay', type=float, default=0.0,
help="""weight decay (L2 penalty)""")
parser.add_argument('-amsgrad', action='store_true',
help='Using AMSGRad for adam')
parser.add_argument('-update_method', default='regular',
help="Type of update rule to use. Options are [regular|noam].")
# pretrained word vectors
parser.add_argument('-tie_weights', action='store_true',
help='Tie the weights of the encoder and decoder layer')
# parser.add_argument('-experimental', action='store_true',
# help='Set the model into the experimental mode (trying unverified features)')
parser.add_argument('-join_embedding', action='store_true',
help='Jointly train the embedding of encoder and decoder in one weight')
# parser.add_argument('-add_position_encoding', action='store_true',
# help='Adding pos encodings to embedding (like Transformer)')
parser.add_argument('-batch_ensemble', type=int, default=0,
help='To use batch ensemble algorithm')
parser.add_argument('-save_metrics', default='ppl',
help="Type of update rule to use. Options are [perplexity|ppl|accuracy|acc].")
# GPU
parser.add_argument('-gpus', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-fp16', action='store_true',
help='Use half precision training')
parser.add_argument('-seed', default=-1, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-log_interval', type=int, default=100,
help="Print stats at this interval.")
parser.add_argument('-save_every', type=int, default=-1,
help="Save every this interval.")
parser.add_argument('-keep_save_files', type=int, default=5,
help="Save every this interval.")
parser.add_argument('-copy_generator', action='store_true',
help='Use the copy_generator')
parser.add_argument('-verbose', action='store_true',
help='Show more information about training (for Nerds)')
# FAST IMPLEMENTATION
parser.add_argument('-fast_xentropy', action="store_true",
help="""Fast cross entropy loss""")
parser.add_argument('-fast_xattention', action="store_true",
help="""Fast cross attention between encoder decoder""")
parser.add_argument('-fast_self_attention', action="store_true",
help="""Fast self attention between encoder decoder""")
parser.add_argument('-fast_feed_forward', action="store_true",
help="""Fast cross attention between encoder decoder""")
parser.add_argument('-macaron', action='store_true',
help='Macaron style network with 2 FFN per block.')
parser.add_argument('-fused_ffn', action="store_true",
help="""Fast feedforward""")
parser.add_argument('-favor_attention', action="store_true",
help="""Use Favor+ Attention for faster self-attention""")
# for FUSION
parser.add_argument('-lm_checkpoint', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-fusion', action='store_true',
help='Use fusion training with language model')
parser.add_argument('-lm_seq_length', type=int, default=128,
help='Sequence length for the language model')
# for Speech
parser.add_argument('-reshape_speech', type=int, default=0,
help="Reshaping the speech data (0 is ignored, done at preprocessing).")
parser.add_argument('-concat', type=int, default=4,
help="Concatenate frames to downsample.")
parser.add_argument('-input_feature_size', type=int, default=40,
help="Input feature size.")
parser.add_argument('-augment_speech', action='store_true',
help='Use f/t augmentation for speech')
parser.add_argument('-wav2vec_spec_augment', action='store_true',
help='Use f/t augmentation for wav2vec')
parser.add_argument('-upsampling', action='store_true',
help='In case the data is downsampled during preprocess. This option will upsample the '
'samples again')
parser.add_argument('-cnn_downsampling', action='store_true',
help='Use CNN for downsampling instead of reshaping')
parser.add_argument('-zero_encoder', action='store_true',
help='Zero-out encoders during training')
parser.add_argument('-ctc_loss', type=float, default=0.0,
help='CTC Loss as additional loss function with this weight')
# parser.add_argument('-lfv_multilingual', action='store_true',
# help='Use multilingual language identifier to get LFV for each language')
parser.add_argument('-bottleneck_size', type=int, default=64,
help="Bottleneck size for the LFV vector).")
parser.add_argument('-conv_kernel', type=int, default=31,
help="Kernels for convolution in conformer).")
parser.add_argument('-no_batch_norm', action='store_true',
help="Remove Batch Norm to avoid NaN errors that can happen with spec augmentation.).")
parser.add_argument('-depthwise_conv', action='store_true',
help='Use depthwise convolution in the encoder block')
parser.add_argument('-no_ffn', action='store_true',
help='No feedforward network in the speech encoder')
parser.add_argument('-multilingual_factorized_weights', action='store_true',
help='Factorize the weights in the model for multilingual')
parser.add_argument('-multilingual_factorized_weights_decoder', action='store_true',
help='Factorize the weights in the model decoder for multilingual')
parser.add_argument('-fast_factorize', action='store_true',
help='Fast Factorize the weights in the model for multilingual (Batch Ensemble style)')
parser.add_argument('-mfw_rank', type=int, default=1,
help="Rank of the mfw vectors.")
parser.add_argument('-mfw_multiplicative', action='store_true',
help='Use another multiplicative weights W = W^ * M + A')
parser.add_argument('-mfw_no_bias', action='store_true',
help='Use another multiplicative weights W = W^ * M + A')
parser.add_argument('-mfw_activation', type=str, default="none",
help="Using activation function for the MFW so W = f(W^ * M + A'). "
"Currently accepting gelu/silu")
parser.add_argument('-mfw_atb_rank_scale', type=float, default=1.0,
help="Rank of the mfw atb vectors.")
parser.add_argument('-freezing_steps', type=int, default=0,
help="Number of steps for freezing the mfw vectors.")
parser.add_argument('-multilingual_partitioned_weights', action='store_true',
help='Partition the weights in the multilingual models')
parser.add_argument('-mpw_factor_size', type=int, default=8,
help="Size of the language factor vector")
parser.add_argument('-multilingual_layer_norm', action='store_true',
help='New norm for each language')
parser.add_argument('-multilingual_linear_projection', action='store_true',
help='New linear projection for each language')
parser.add_argument('-sub_encoder', type=int, default=4,
help='New linear projection for each language')
parser.add_argument('-weight_drop', type=float, default=0.0,
help='dropout rate for the main weights of the MFW model')
parser.add_argument('-multilingual_adapter', action='store_true',
help='New norm for each language')
parser.add_argument('-adapter_bottleneck_size', type=int, default=1024,
help='New norm for each language')
parser.add_argument('-ffn_activation', default='silu', type=str,
help='The activation layer in each transformer block '
'relu|gelu|silu|swish')
parser.add_argument('-ffn_glu', action='store_true',
help='Gated Linear Unit application at the FFN')
# for Reversible Transformer
parser.add_argument('-src_reversible', action='store_true',
help='Using reversible models for encoder')
parser.add_argument('-tgt_reversible', action='store_true',
help='Using reversible models for decoder')
parser.add_argument('-debugging', action='store_true',
help='Using reversible models for decoder')
parser.add_argument('-master_addr', default='localhost', type=str,
help="""""")
parser.add_argument('-master_port', default='8888', type=str,
help="""""")
# for DISCOURSE-AWARE models
# parser.add_argument('-n_past', type=int, default=0,
# help='number of segments / utterances in the past')
# parser.add_argument('-n_future', type=int, default=0,
# help='number of segments / utterances in the future')
# For pretraining
# pretrained encoder
parser.add_argument('-enc_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-enc_stacked_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-enc_pretrain_hidden_size', type=int, default=768,
help='Size of bert hidden')
parser.add_argument('-s4_config_file', default="", type=str,
help=""" the name of src pretrained model configuration.""")
parser.add_argument('-enc_config_file', default="", type=str,
help=""" the name of src pretrained model configuration.""")
parser.add_argument('-enc_state_dict', default="", type=str,
help=""" the state_dict of the pretrained model for src language """)
# parser.add_argument('-enc_not_load_state', action='store_true',
# help='only create a Bert Object, not load the state from pytorch modle or fituned model for src')
parser.add_argument('-enc_pretrain_word_dropout', type=float, default=0.0,
help="""word dropout appled on bert""")
parser.add_argument('-enc_pretrain_emb_dropout', type=float, default=0.0,
help="""dropout applied on bert embedding""")
parser.add_argument('-enc_pretrain_attn_dropout', type=float, default=0.1,
help="""dropout on bert attention, corresponds to attention_probs_dropout_prob""")
parser.add_argument('-enc_pretrain_hidden_dropout', type=float, default=0.0,
help="""dropout applied on bert hidden, corresponds to hidden_dropout_prob""")
parser.add_argument('-checkpointing_ffn', action='store_true',
help='use gradient checkpointing on FFN layers')
parser.add_argument('-checkpointing_cross_attn', action='store_true',
help='use gradient checkpointing on Cross Attn layers')
parser.add_argument('-checkpointing_self_attn', action='store_true',
help='use gradient checkpointing on (wav2vec) self attn layers')
# pretrained decoder
parser.add_argument('-dec_pretrained_model', default="", type=str,
help=""" the name of trained model""")
parser.add_argument('-dec_pretrain_hidden_size', type=int, default=768,
help='Size of bert hidden')
parser.add_argument('-dec_config_file', default="", type=str,
help=""" the name of tgt pretrained model configuration.""")
parser.add_argument('-dec_state_dict', default="", type=str,
help=""" the state_dict of the pretrained model""")
parser.add_argument('-dec_pretrain_word_dropout', type=float, default=0.0,
help="""word dropout appled on bert""")
parser.add_argument('-dec_pretrain_emb_dropout', type=float, default=0.1,
help="""dropout applied on bert embedding""")
parser.add_argument('-dec_pretrain_attn_dropout', type=float, default=0.1,
help="""dropout on bert attention, corresponds to attention_probs_dropout_prob""")
parser.add_argument('-dec_pretrain_hidden_dropout', type=float, default=0.1,
help="""dropout applied on bert hidden, corresponds to hidden_dropout_prob""")
parser.add_argument('-dec_gradient_checkpointing', action='store_true',
help='use gradient checkpointing on decoder')
parser.add_argument('-enc_gradient_checkpointing', action='store_true',
help='use gradient checkpointing on encoder')
parser.add_argument('-find_unused_parameters', action='store_true',
help='find unused parameters for torch DistributedDataParallel')
# special tokens
parser.add_argument('-src_pad_word', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-src_unk_word', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_bos_word', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_word', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-tgt_pad_word', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_unk_word', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-tgt_bos_word', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_word', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-rezero', action='store_true',
help='use ReZero residual mechanism')
parser.add_argument('-post_norm', action='store_true',
help='use post-layer norm')
parser.add_argument('-absolute_position_encoding', action='store_true',
help='use absolute position encoding for the Translator')
parser.add_argument('-decoder_late_emb_scale', action='store_true',
help='only scale the embedding very late at the decoder. This option is here'
'to fix the problem of the multilingual model w/ relative position.')
parser.add_argument('-encoder_early_emb_scale', action='store_true',
help='only scale the embedding early in the encoder. This option is here'
'to fix the problem of the multilingual model w/ relative position.')
parser.add_argument('-sa_f', type=int, default=8,
help="""word dropout appled on bert""")
parser.add_argument('-sa_t', type=int, default=64,
help="""word dropout appled on bert""")
parser.add_argument('-no_input_scale', action='store_true',
help='Do not scale the embeddings of the speech (the features) before transformer.')
parser.add_argument('-mpc', action='store_true',
help='Using masked predictive coding for speech models')
parser.add_argument('-load_pretrained_classifier', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model.""")
parser.add_argument('-wav2vec2_pretrained_model', default='wav2vec2-large-lv60', type=str,
help="""Wav2vec2 model from HuggingFace. """)
parser.add_argument('-wav2vec2_quantize', action='store_true',
help='Keep the quantization part of Wav2vec 2.0')
parser.add_argument('-wav2vec2_dual_output', action='store_true',
help='Use both wav2vec quantized and continuous outputs for decoder')
parser.add_argument('-wav2vec2_relative_attention', action='store_true',
help='Add relative attention to Wav2vec 2.0 ')
parser.add_argument('-freeze_encoder', action='store_true',
help='Freeze the whole wav2vec weights.')
parser.add_argument('-freeze_encoder_self_attn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_encoder_ffn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_decoder', action='store_true',
help='Freeze the whole mbart decoder weights.')
parser.add_argument('-freeze_decoder_self_attn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_decoder_ffn', action='store_true',
help='Freeze the wav2vec self-attention weight.')
parser.add_argument('-freeze_cross_attention', action='store_true',
help='Freeze the cross attention.')
parser.add_argument('-freeze_embedding', action='store_true',
help='Freeze the embedding.')
parser.add_argument('-virtual_adversarial_training_mode', type=int, default=0,
help='Virtual Adversarial Training. 0=disabled. 1=kl_loss. 2=ce. 3=kl_loss + ce.')
parser.add_argument('-wav2vec_adapter', type=int, default=0,
help='Adapter for wav2vec model')
parser.add_argument('-decoder_adapter', type=int, default=0,
help='Adapter for wav2vec model')
parser.add_argument('-mutual_modality_training', type=float, default=0,
help='Coefficient for the Mutual Modality Training term')
parser.add_argument('-contrastive_loss_coeff', type=float, default=0.0,
help='Coefficient for the Mutual Modality Training term')
parser.add_argument('-predict_language', action='store_true',
help='Freeze the embedding.')
return parser
def backward_compatible(opt):
# FOR BACKWARD COMPATIBILITY
if not hasattr(opt, 'predict_language'):
opt.predict_language = False
if not hasattr(opt, 'model'):
opt.model = 'recurrent'
if not hasattr(opt, 'layer_norm'):
opt.layer_norm = 'slow'
if not hasattr(opt, 'attention_out'):
opt.attention_out = 'default'
if not hasattr(opt, 'residual_type'):
opt.residual_type = 'regular'
if not hasattr(opt, 'input_size'):
opt.input_size = 40
if not hasattr(opt, 'init_embedding'):
opt.init_embedding = 'normal'
if not hasattr(opt, 'ctc_loss'):
opt.ctc_loss = 0
if not hasattr(opt, 'encoder_layers'):
opt.encoder_layers = -1
if not hasattr(opt, 'fusion'):
opt.fusion = False
if not hasattr(opt, 'cnn_downsampling'):
opt.cnn_downsampling = False
if not hasattr(opt, 'switchout'):
opt.switchout = 0.0
if not hasattr(opt, 'variational_dropout'):
opt.variational_dropout = False
if not hasattr(opt, 'copy_generator'):
opt.copy_generator = False
if not hasattr(opt, 'upsampling'):
opt.upsampling = False
if not hasattr(opt, 'double_position'):
opt.double_position = False
if not hasattr(opt, 'max_pos_length'):
opt.max_pos_length = 0
if not hasattr(opt, 'learnable_position_encoding'):
opt.learnable_position_encoding = False
if not hasattr(opt, 'use_language_embedding'):
opt.use_language_embedding = False
if not hasattr(opt, 'language_embedding_type'):
opt.language_embedding_type = "sum"
if not hasattr(opt, 'asynchronous'):
opt.asynchronous = False
if not hasattr(opt, 'bidirectional'):
opt.bidirectional = False
if not hasattr(opt, 'fix_norm_output_embedding'):
opt.fix_norm_output_embedding = False
if not hasattr(opt, 'mirror_loss'):
opt.mirror_loss = False
if not hasattr(opt, 'max_memory_size'):
opt.max_memory_size = 0
if not hasattr(opt, 'stream_context'):
opt.stream_context = 'local'
if not hasattr(opt, 'extra_context_size'):
opt.extra_context_size = 0
if not hasattr(opt, 'experimental'):
opt.experimental = False
if not hasattr(opt, 'reconstruct'):
opt.reconstruct = False
if not hasattr(opt, 'unidirectional'):
opt.unidirectional = False
if not hasattr(opt, 'lsh_src_attention'):
opt.lsh_src_attention = False
if not hasattr(opt, 'src_reversible'):
opt.src_reversible = False
if not hasattr(opt, 'tgt_reversible'):
opt.tgt_reversible = False
if not hasattr(opt, 'fast_xentropy'):
opt.fast_xentropy = False
if not hasattr(opt, 'fast_xattention'):
opt.fast_xattention = False
if not hasattr(opt, 'fast_self_attention'):
opt.fast_self_attention = False
if not hasattr(opt, 'fast_feed_forward'):
opt.fast_feed_forward = False
if not hasattr(opt, 'fused_ffn'):
opt.fused_ffn = False
if not hasattr(opt, 'concat'):
opt.concat = 4
if not hasattr(opt, 'input_feature_size'):
opt.input_feature_size = 40
if not hasattr(opt, 'bayes_by_backprop'):
opt.bayes_by_backprop = False
if not hasattr(opt, 'add_position_encoding'):
opt.add_position_encoding = False
if not hasattr(opt, 'batch_ensemble'):
opt.batch_ensemble = 0
if not hasattr(opt, 'multilingual_factorized_weights'):
opt.multilingual_factorized_weights = False
if not hasattr(opt, 'multilingual_factorized_weights_decoder'):
opt.multilingual_factorized_weights_decoder = False
if not hasattr(opt, 'mfw_rank'):
opt.mfw_rank = 1
if not hasattr(opt, 'mfw_no_bias'):
opt.mfw_no_bias = False
if not hasattr(opt, 'lfv_multilingual'):
opt.lfv_multilingual = False
if not hasattr(opt, 'nce_noise'):
opt.nce_noise = 0
if not hasattr(opt, 'mfw_multiplicative'):
opt.mfw_multiplicative = False
if not hasattr(opt, 'fast_factorize'):
opt.fast_factorize = False
if not hasattr(opt, 'macaron'):
opt.macaron = False
if not hasattr(opt, 'depthwise_conv'):
opt.depthwise_conv = False
if not hasattr(opt, 'fused_ffn'):
opt.fused_ffn = False
if not hasattr(opt, 'no_batch_norm'):
opt.no_batch_norm = False
if not hasattr(opt, 'no_ffn'):
opt.no_ffn = False
if not hasattr(opt, 'multilingual_partitioned_weights'):
opt.multilingual_partitioned_weights = False
if not hasattr(opt, 'mpw_factor_size'):
opt.mpw_factor_size = 1
if not hasattr(opt, 'multilingual_layer_norm'):
opt.multilingual_layer_norm = False
if not hasattr(opt, 'multilingual_linear_projection'):
opt.multilingual_linear_projection = False
if not hasattr(opt, 'weight_drop'):
opt.weight_drop = 0.0
if not hasattr(opt, 'multilingual_adapter'):
opt.multilingual_adapter = False
if not hasattr(opt, 'adapter_bottleneck_size'):
opt.adapter_bottleneck_size = 0.0
if not hasattr(opt, 'mfw_activation'):
opt.mfw_activation = "none"
if not hasattr(opt, 'src_pad_word'):
opt.src_pad_word = '<blank>'
if not hasattr(opt, 'src_unk_word'):
opt.src_unk_word = '<unk>'
if not hasattr(opt, 'src_bos_word'):
opt.src_bos_word = '<s>'
if not hasattr(opt, 'src_eos_word'):
opt.src_eos_word = '</s>'
if not hasattr(opt, 'tgt_pad_word'):
opt.tgt_pad_word = '<blank>'
if not hasattr(opt, 'tgt_unk_word'):
opt.tgt_unk_word = '<unk>'
if not hasattr(opt, 'tgt_bos_word'):
opt.tgt_bos_word = '<s>'
if not hasattr(opt, 'tgt_eos_word'):
opt.tgt_eos_word = '</s>'
if not hasattr(opt, 'enc_pretrained_model'):
opt.enc_pretrained_model = ''
if not hasattr(opt, 'dec_pretrained_model'):
opt.dec_pretrained_model = ''
if not hasattr(opt, 'rezero'):
opt.rezero = False
if not hasattr(opt, 'sa_f'):
opt.sa_f = 8
if not hasattr(opt, 'sa_t'):
opt.sa_t = 64
if not hasattr(opt, 'ffn_activation'):
opt.ffn_activation = 'relu'
if not hasattr(opt, 'ffn_glu'):
opt.ffn_glu = False
if not hasattr(opt, 'absolute_position_encoding'):
opt.absolute_position_encoding = False
if not hasattr(opt, 'rotary_position_encoding'):
opt.rotary_position_encoding = False
if not hasattr(opt, 'decoder_late_emb_scale'):
opt.decoder_late_emb_scale = False
if not hasattr(opt, 'encoder_early_emb_scale'):
opt.encoder_early_emb_scale = False
if not hasattr(opt, 'no_input_scale'):
opt.no_input_scale = False
if not hasattr(opt, 'stochastic_sublayer'):
opt.stochastic_sublayer = False
if not hasattr(opt, 'ffn_dropout'):
opt.ffn_dropout = opt.dropout
if not hasattr(opt, 'residual_dropout'):
opt.residual_dropout = opt.dropout
if not hasattr(opt, 'post_norm'):
opt.post_norm = False
if not hasattr(opt, 'favor_attention'):
opt.favor_attention = False
if not hasattr(opt, 'wav2vec_spec_augment'):
opt.wav2vec_spec_augment = False
if not hasattr(opt, 'wav2vec_adapter'):
opt.wav2vec_adapter = 0
if not hasattr(opt, 'wav2vec2_quantize'):
opt.wav2vec2_quantize = False
if not hasattr(opt, 'wav2vec2_dual_output'):
opt.wav2vec2_dual_output = False
if not hasattr(opt, 'decoder_adapter'):
opt.decoder_adapter = 0
if not hasattr(opt, 'freeze_encoder'):
opt.freeze_encoder = False
if not hasattr(opt, 'freeze_embedding'):
opt.freeze_embedding = False
if not hasattr(opt, 'freeze_decoder'):
opt.freeze_decoder = False
if not hasattr(opt, 'freeze_cross_attention'):
opt.freeze_cross_attention = False
if not hasattr(opt, 'enc_stacked_pretrained_model'):
opt.enc_stacked_pretrained_model = ""
if not hasattr(opt, 'mfw_atb_rank_scale'):
opt.mfw_atb_rank_scale = 0.125
if not hasattr(opt, 'wav2vec2_relative_attention'):
opt.wav2vec2_relative_attention = False
return opt
| 43,910
| 49.822917
| 149
|
py
|
NMTGMinor
|
NMTGMinor-master/extend_weight.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import os, sys
from onmt.model_factory import build_model, build_language_model, build_classifier, optimize_model
from copy import deepcopy
from onmt.utils import checkpoint_paths, normalize_gradients
import glob
import torch.nn as nn
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-n_languages', type=int, default=10,
help="Device to run on")
def custom_build_model(opt, dict, lm=False, type='seq2seq'):
if type == 'seq2seq':
if not lm:
model = build_model(opt, dict)
else:
model = build_language_model(opt, dict)
elif type == 'classifier':
model = build_classifier(opt, dict)
optimize_model(model)
return model
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# checkpoint for main model
checkpoint = torch.load(opt.model, map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
# extending the weights
def is_factorize_params(p_name):
# feed forward neural net
if p_name.endswith(".r_i") or p_name.endswith(".s_i") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_p") or p_name.endswith(".s_p"):
return True
# if p_name.endswith(".sub_r_i") or p_name.endswith(".sub_s_i") \
# or p_name.endswith(".sub_r_o") or p_name.endswith(".sub_s_o") \
# or p_name.endswith(".sub_r_p") or p_name.endswith(".sub_s_p"):
# return True
if p_name.endswith(".rm_i") or p_name.endswith(".sm_i") or \
p_name.endswith(".rm_o") or p_name.endswith(".sm_o") or \
p_name.endswith(".rm_p") or p_name.endswith(".sm_p"):
return True
if p_name.endswith(".r_q") or p_name.endswith(".s_q") \
or p_name.endswith(".r_o") or p_name.endswith(".s_o") \
or p_name.endswith(".r_kv") or p_name.endswith(".s_kv"):
return True
if p_name.endswith(".rm_q") or p_name.endswith(".sm_q") \
or p_name.endswith(".rm_o") or p_name.endswith(".sm_o") \
or p_name.endswith(".rm_kv") or p_name.endswith(".sm_kv"):
return True
return False
# Saving
model_state_dict = checkpoint['model']
for name in model_state_dict:
if is_factorize_params(name):
param = model_state_dict[name]
sizes = list(param.size())
print(name)
# initialize it
if name.endswith("r_i") or name.endswith("r_o") or name.endswith("r_kv") or name.endswith("r_q") or name.endswith("r_p") or \
name.endswith("s_i") or name.endswith("s_o") or name.endswith("s_kv") or name.endswith("s_q") or name.endswith(
"s_p"):
std = 0.02
prev_n_languages = sizes[0]
sizes[0] = max(opt.n_languages, sizes[0])
# new parameter
p = param.new_zeros(sizes)
nn.init.normal_(p, 0.0, std)
p[0:prev_n_languages].copy_(param)
elif name.endswith("rm_i") or name.endswith("rm_o") or name.endswith("rm_kv") or name.endswith("rm_q") or name.endswith("rm_p") or \
name.endswith("sm_i") or name.endswith("sm_o") or name.endswith("sm_kv") or name.endswith("sm_q") or name.endswith(
"sm_p"):
rank = sizes[1]
fast = (sizes[0] > 1)
prev_n_languages = sizes[0]
if fast:
# new parameter
sizes[0] = max(opt.n_languages, sizes[0])
p = param.new_zeros(sizes)
else:
sizes[0] = 1
p = param.new_zeros(sizes)
sizes[0] = 1
constant = math.sqrt(1.0 / rank) if fast else 1
nn.init.constant_(p, constant)
if fast:
p[0:prev_n_languages].copy_(param)
else:
p.copy_(param)
model_state_dict[name] = p
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration': -1,
'batchOrder': None,
'optim': None
}
output = opt.model + ".extend" + str(opt.n_languages)
print("Saving averaged model to %s" % output)
torch.save(save_checkpoint, output)
if __name__ == "__main__":
main()
| 5,225
| 31.259259
| 144
|
py
|
NMTGMinor
|
NMTGMinor-master/preprocess_triangle.py
|
#!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import numpy as np
import warnings
import os
from os.path import dirname, abspath
import gc
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-multi_dataset', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-multi_mirror', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-resume', action='store_true',
help="If the dataset is created, ignored and create the next one")
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-aux_train_tgt', default="",
help="Path to the training source data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-aux_valid_tgt', default="",
help="Path to the training source data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-train_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def save_dataset(path, data, format, dicts, src_type):
# Each dataset is comprised of the following components:
# src: tensors for the source vectors, or the scp_path (in ASR case)
# tgt: tensors for the target vectors
# src_lang: tensors for the source language ids (simplified)
# tgt_lang: tensors for the target language ids (simplified)
# convert all datasets to pytorch tensors and save to .pt
if format in ['raw', 'bin']:
print('Saving data to ' + os.path.join(path, 'data.pt') + '...')
save_data = {'type': opt.src_type ,
'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print("Done")
# for ASR only
elif format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
# TODO: changing this to before saving everything
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'aux_tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy") % set_, np_array)
else:
print("Training %s not found " % set_)
# Finally save the audio path
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if 'prev_src' in data and data['prev_src'] is not None:
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy" % set_), np_array)
else:
print("Set %s not found " % set_)
def make_lm_data(tgt_file, tgt_dicts, max_tgt_length=1000, input_type='word', data_type='int32'):
tgt = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (tgt_file))
tgtf = open(tgt_file)
eos = torch.LongTensor(1).fill_(opt.tgt_eos_token)
# print(eos.size())
tensors = [eos]
# find the number of words in the sentence
while True:
tline = tgtf.readline()
# normal end of file
if tline == "":
break
tline = tline.strip()
# source and/or target are empty
if tline == "":
print('WARNING: ignoring an empty line (' + str(count + 1) + ')')
continue
if input_type == 'word':
tgt_words = tline.split()
elif input_type == 'char':
tgt_words = split_line_by_char(tline)
tensor = tgt_dicts.convertToIdx(tgt_words,
opt.tgt_unk_token,
None,
opt.tgt_eos_token,
type=data_type)
# print(tensor.size())
tensors.append(tensor)
count = count + 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
tgtf.close()
# concatenate all tensors into one
tensor = torch.cat(tensors, dim=-1)
return tensor
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False,
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[],
early_save=False, savedir="", mirror=False, mirror_savedir=""):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
if type(lang_list) is dict:
lang_list = sorted(list(lang_list.keys()))
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=src_lang, lang_list=lang_list, target=False
)
if early_save:
os.makedirs(savedir, exist_ok=True)
if mirror:
os.makedirs(mirror_savedir, exist_ok=True)
src_len = len(binarized_src['data'])
print("Saving source data to %s .... with %d entries" % (savedir, src_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "src"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_src['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "src"))
del binarized_src['data']
gc.collect()
np_array = np.asarray(binarized_src['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "src_sizes"), np_array)
del binarized_src
del indexed_data
del np_array
gc.collect()
if mirror:
print("Saving mirrrored target data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "src")
target = os.path.join(mirror_savedir, "data.%s.bin" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "src")
target = os.path.join(mirror_savedir, "data.%s.idx" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "src_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "tgt_sizes")
os.symlink(os.path.abspath(source), target)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True
)
if early_save:
tgt_len = len(binarized_tgt['data'])
assert tgt_len == src_len, "Number of samples doesn't match between source and target!!!"
print("Saving target data to %s .... with %d samples" % (savedir, tgt_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "tgt"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_tgt['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "tgt"))
del binarized_tgt['data']
del indexed_data
gc.collect()
np_array = np.asarray(binarized_tgt['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "tgt_sizes"), np_array)
del binarized_tgt
del np_array
gc.collect()
if mirror:
print("Saving mirrrored source data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.bin" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.idx" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "tgt_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "src_sizes")
os.symlink(os.path.abspath(source), target)
src, tgt, src_sizes, tgt_sizes = None, None, None, None
else:
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="scp", output_format="raw",
external_tokenizer=None, src_lang=None, tgt_lang=None,aux_tgt_file=None, lang_list=[]):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
else:
tgt = None
tgt_sizes = None
if aux_tgt_file is not None:
aux_tgt = []
print("[INFO] Binarizing auxiliary target file %s ..." % aux_tgt_file)
aux_binarized_tgt = Binarizer.binarize_file(aux_tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list)
aux_tgt = aux_binarized_tgt['data']
aux_tgt_sizes = aux_binarized_tgt['sizes']
ignored = 0
else:
aux_tgt = None
aux_tgt_sizes = None
print('[INFO] Processing %s ...' % src_file)
# num_workers = num_workers if asr_format in ['scp', 'kaldi'] else 1
# speech binarizer has to be 1 thread at the moment
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if len(src_sizes) != len(tgt_sizes) and tgt_file is not None:
print("Warning: data size mismatched. Src: %d . Tgt: %d" % len(src_sizes), len(tgt_sizes))
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes, aux_tgt, aux_tgt_sizes
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict is not None and len(opt.load_dict) > 0:
print("[INFO] Loading dictionary from ... %s" % opt.load_dict)
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if len (opt.train_src_atbs) > 0:
src_atbs = opt.train_src_atbs.split("|")
tgt_atbs = opt.train_tgt_atbs.split("|")
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if 'langs' not in dicts:
dicts['langs'] = dict()
else:
print(dicts['langs'])
print("Adding languages to existing dictionary ...")
for lang in langs:
idx = len(dicts['langs'])
if lang not in dicts['langs']:
dicts['langs'][lang] = idx
if 'atbs' not in dicts:
dicts['atbs'] = dict()
else:
print("Adding attributes to existing dictionary ...")
for atb in atbs:
idx = len(dicts['atbs'])
if atb not in dicts['atbs']:
dicts['atbs'][atb] = idx
print("Languages: ", dicts['langs'])
print("Attributes: ", dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# for ASR and LM we only need to build vocab for the 'target' language
if opt.asr or opt.lm:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt,
dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt,
dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
src_atbs = opt.train_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.train_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(src_atbs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
assert len(tgt_input_files) == len(tgt_atbs)
past_src_files = opt.past_train_src.split("|")
idx = 0
n_input_files = len(src_input_files)
# Training data ###################################################################
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_atb'], train['tgt_atb'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
data = dict()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "train.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
src_atb_data, tgt_atb_data = None, None
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
add_bos=not opt.no_bos,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
# Finalizing Training data ###################################################################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving training set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "train.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
# Validation data ###################################################################
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
src_atbs = opt.valid_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.valid_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
valid['src_atb'], valid['tgt_atb'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "valid.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
# Finalizing Validation data ... #########################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else:
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = idx if not opt.multi_mirror else 2 * idx
data_name = "train.%i.%s-%s" % (dataset_idx , src_lang, tgt_lang)
mirrored_data_name = "train.%i.%s-%s" % (dataset_idx + 1 , tgt_lang, src_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if opt.multi_dataset:
if opt.resume and os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
else:
os.makedirs(dataset_path, exist_ok=True)
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'],
early_save=opt.multi_dataset,
savedir=dataset_path,
mirror=opt.multi_mirror,
mirror_savedir=mirrored_dataset_path)
#TODO: check
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert src_data is not None
n_samples = len(src_data)
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx + 1, tgt_lang, src_lang))
# take basedir from opt.save_data
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs']
)
n_samples = len(src_data)
#TODO: this has to be changed
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
if opt.multi_dataset:
# SAVE DATA
print("Saving dictionary to %s" % (opt.save_data + '.dict.pt'))
torch.save(dicts, opt.save_data + '.dict.pt')
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print("Finished.")
else:
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if set_ not in train or train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if set_ not in train or train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 64,908
| 43.397401
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/extract_wav2vec2_tdnn.py
|
#!/usr/bin/env python
# from fairseq.checkpoint_utils import load_model_ensemble_and_task, load_checkpoint_to_cpu
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
from onmt.inference.fast_translator import FastTranslator
from onmt.inference.stream_translator import StreamTranslator
from torch.cuda.amp import autocast
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-vocab_list', default="",
help='A Vocabulary list (1 word per line). Only are these words generated during translation.')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-sub_ensemble_weight', default="",
help='Weight for ensembles. Default as uniform. Split them by | and they will be normalized later')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=str, default="1",
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-max_memory_size', type=int, default=512,
help="Number of memory states stored in the buffer for XL models")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-scp_output', default='output.scp',
help="""Path to output the feature paths""")
parser.add_argument('-ark_output', default='output.ark',
help="""Path to output the features""")
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size (in audio samples)')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
def _is_oversized(batch, new_sent_size, batch_size):
"""
Function to see if adding new sentence will make the current batch
:param batch:
:param new_sent_size:
:param batch_size_words:
:return:
"""
# Always return False if empty
if len(batch) == 0:
return False
current_max_length = max([sent.size(0) for sent in batch])
# Because adding a new sentence will potential enlarge the area of the rectangle, we need to check
if max(current_max_length, new_sent_size) * (len(batch) + 1) > batch_size:
return True
return False
def write_ark(utts, features, padding_mask, out_ark, out_scp, opt):
# cache_wav = ''
features = features.cpu()
bsz, seq_len, feat_size = features.size()
lengths = (1 - padding_mask).sum(dim=1)
assert len(utts) == bsz
for i in range(bsz):
feature_ = features[i, 0:lengths[i]]
feature_ = feature_.numpy()
# if opt.fp16:
# feature_ = feature_.astype(np.float16)
seg_name = utts[i]
dic = {seg_name: feature_}
from onmt.data.kaldiio.io import write_ark_file
write_ark_file(out_ark, out_scp, dic)
def build_data(src_sents):
from onmt.data.wav_dataset import WavDataset
src_data = src_sents
data_type = 'wav'
tgt_data = None
src_lang_data = [torch.Tensor([0])]
tgt_lang_data = None
return onmt.Dataset(src_data, tgt_data,
src_langs=src_lang_data, tgt_langs=tgt_lang_data,
batch_size_words=sys.maxsize,
max_src_len=sys.maxsize,
data_type=data_type,
batch_size_sents=sys.maxsize,
src_align_right=False,
past_src_data=None)
if __name__ == '__main__':
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
from onmt.models.speech_recognizer.wav2vec2 import FairseqWav2VecExtractor
model = FairseqWav2VecExtractor(opt.model)
# if opt.fp16:
# model = model.half()
if opt.cuda:
model = model.cuda()
model.eval()
ark_out = open(opt.ark_output, 'wb')
scp_out = open(opt.scp_output, 'w')
audio_data = open(opt.src)
from onmt.utils import safe_readaudio
i = 0
n_models = len(opt.model.split("|"))
src_batch = list()
src_utts = list()
while True:
try:
line = next(audio_data).strip().split()
utt = line[0]
if len(line) == 2:
wav_path = line[1]
start = 0
end = 0
else:
wav_path, start, end = line[1], float(line[2]), float(line[3])
line = safe_readaudio(wav_path, start=start, end=end, sample_rate=16000)
except StopIteration:
break
src_length = line.size(0)
"""
Read features output from wav2vec model and write into scp/ark file just like Kaldi w/ logmel features
"""
if _is_oversized(src_batch, src_length, opt.batch_size):
# If adding a new sentence will make the batch oversized
# Then do translation now, and then free the list
print("Batch sizes :", len(src_batch))
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
src_batch = []
src_utts = []
src_batch.append(line)
src_utts.append(utt)
# catch the last batch
if len(src_batch) != 0:
print("Batch sizes :", len(src_batch), )
dataset = build_data(src_batch)
batch = dataset.get_batch(0)
batch.cuda()
with autocast(enabled=opt.fp16):
features, padding_mask = model(batch)
write_ark(src_utts, features, padding_mask, ark_out, scp_out, opt)
src_batch = []
src_utts = []
ark_out.close()
scp_out.close()
| 7,353
| 32.733945
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/translate_distributed.py
|
#!/usr/bin/env python
import sys
import os
import tempfile
from itertools import islice
from time import time
from multiprocessing import Pool
from translate import main as translate_main
from onmt.utils import safe_readline
def find_offsets(filename, num_chunks):
"""
:param filename: string
:param num_chunks: int
:return: a list of offsets (positions to start and stop reading)
"""
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
def hasopt(opt):
return ('-' + opt) in sys.argv
def popopt(opt):
# TODO handle different option formats, e.g. --opt or -opt=val
idx = sys.argv.index('-' + opt)
sys.argv.pop(idx)
return sys.argv.pop(idx)
def distribute_to_tempfiles(srcfile, n):
tmpfiles = [tempfile.NamedTemporaryFile('w', encoding='utf8') for _ in range(n)]
offsets = find_offsets(srcfile, n)
lines_per_tf = list()
all_lines = len(open(srcfile).readlines())
for i, tf in enumerate(tmpfiles):
n_lines = 0
start, end = offsets[i], offsets[i + 1]
with open(srcfile, 'r', encoding='utf8') as f:
f.seek(start)
line = safe_readline(f)
while line:
if 0 < end < f.tell():
break
tf.write(line)
n_lines += 1
line = f.readline()
tf.flush()
lines_per_tf.append(n_lines)
print("Lines per tmp files to be translated: ", lines_per_tf)
assert (sum(lines_per_tf) == all_lines)
# nlines = len(list(f))
# f.seek(0)
# # round up
# linesperpart = int((nlines + n - 1) / n)
# for tf in tmpfiles:
# for line in islice(f, linesperpart):
# tf.write(line)
# tf.flush()
return tmpfiles, lines_per_tf
def distribute_to_tempfiles_withlist(srcfile, n, line_per_tf):
tmpfiles = [tempfile.NamedTemporaryFile('w', encoding='utf8') for _ in range(n)]
assert len(line_per_tf) == n
with open(srcfile) as f:
for i, tf in enumerate(tmpfiles):
nlines = line_per_tf[i]
for _ in range(nlines):
line = f.readline()
tf.write(line)
tf.flush()
return tmpfiles
def run_part(args):
infile, goldfile, subsrcfile, pastsrcfile, outfile, gpu = args
start = time()
sys.argv += ['-gpu', gpu, '-src', infile, '-output', outfile]
if goldfile:
sys.argv += ['-tgt', goldfile]
if subsrcfile:
sys.argv += ['-sub_src', subsrcfile]
if pastsrcfile:
sys.argv += ['-past_src', pastsrcfile]
translate_main()
print('GPU {} done after {:.1f}s'.format(gpu, time() - start))
srcfile = popopt('src')
outfile = popopt('output')
gpu_list = popopt('gpus').split(',')
# (1) distribute input lines to N tempfiles
inparts, lines_per_file = distribute_to_tempfiles(srcfile, len(gpu_list))
if hasopt('tgt'):
goldfile = popopt('tgt')
goldparts = distribute_to_tempfiles_withlist(goldfile, len(gpu_list), lines_per_file)
else:
goldparts = [None for _ in range(len(gpu_list))]
if hasopt('sub_src'):
sub_src_file = popopt('sub_src')
sub_src_parts = distribute_to_tempfiles_withlist(sub_src_file, len(gpu_list), lines_per_file)
else:
sub_src_parts = [None for _ in range(len(gpu_list))]
if hasopt('past_src'):
past_src_file = popopt('past_src')
past_src_parts = distribute_to_tempfiles_withlist(past_src_file, len(gpu_list), lines_per_file)
else:
past_src_parts = [None for _ in range(len(gpu_list))]
# (2) run N processes translating one tempfile each
outparts = [tempfile.NamedTemporaryFile('r', encoding='utf8') for _ in gpu_list]
filenames = lambda tmpfiles: [tf.name if tf else None for tf in tmpfiles]
with Pool(len(gpu_list)) as p:
p.map(run_part, zip(filenames(inparts),
filenames(goldparts),
filenames(sub_src_parts),
filenames(past_src_parts),
filenames(outparts),
gpu_list))
# (3) concatenate tempfiles into one output file
with open(outfile, 'w', encoding='utf8') as f:
for outp in outparts:
f.write(outp.read())
| 4,512
| 28.116129
| 99
|
py
|
NMTGMinor
|
NMTGMinor-master/train_distributed.py
|
#!/usr/bin/env python
from __future__ import division
import pickle
import types
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import time, datetime
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
from onmt.data.wav_dataset import WavDataset
from options import make_parser
from collections import defaultdict
from onmt.constants import add_tokenidx
import os
import numpy as np
import warnings
import dill
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager, NamespaceProxy
from torch.multiprocessing import Pool, Process, set_start_method
def pickle_trick(obj, max_depth=10):
output = {}
if max_depth <= 0:
return output
try:
pickle.dumps(obj)
except (pickle.PicklingError, TypeError) as e:
failing_children = []
if hasattr(obj, "__dict__"):
for k, v in obj.__dict__.items():
result = pickle_trick(v, max_depth=max_depth - 1)
if result:
failing_children.append(result)
output = {
"fail": obj,
"err": e,
"depth": max_depth,
"failing_children": failing_children
}
return output
Dataset = onmt.Dataset
#
# class MyManager(BaseManager):
# pass
#
#
# class MMapIndexedDatasetProxy(NamespaceProxy):
# _exposed_ = tuple(dir(MMapIndexedDataset))
#
# def __getattr__(self, name):
# result = super().__getattr__(name)
# if isinstance(result, types.MethodType):
# def wrapper(*args, **kwargs):
# return self._callmethod(name, args, kwargs) # Note the return here
# return wrapper
# return result
#
# def __len__(self):
# callmethod = object.__getattribute__(self, '_callmethod')
# return callmethod('__len__')
#
# def __getitem__(self, index):
# callmethod = object.__getattribute__(self, '_callmethod')
# return callmethod('__getitem__',(index,))
#
#
# MyManager.register('MMapIndexedDataset', MMapIndexedDataset, MMapIndexedDatasetProxy)
#
def numpy_to_torch(tensor_list):
out_list = list()
for tensor in tensor_list:
if isinstance(tensor, np.ndarray):
out_list.append(torch.from_numpy(tensor))
else:
out_list.append(tensor)
return out_list
def run_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants):
"""
Launch training for normal sequence2sequence models
Args:
gpu:
train_data:
valid_data:
dicts:
opt:
checkpoint:
constants:
Returns:
"""
from onmt.train_utils.mp_trainer import Trainer
trainer = Trainer(gpu, dicts, opt, constants)
trainer.run(checkpoint=checkpoint, train_data=train_data, valid_data=valid_data)
def run_gem_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants):
"""
Launch training for Gradient Episodic Memory
Args:
gpu:
train_data:
valid_data:
dicts:
opt:
checkpoint:
constants:
Returns:
"""
from onmt.train_utils.gem_trainer import GEMTrainer
trainer = GEMTrainer(gpu, train_data, valid_data, dicts, opt, constants)
trainer.run(checkpoint=checkpoint)
def main(gpu, opt):
def lprint(*args, **kwargs):
if gpu == 0:
print(*args, **kwargs, flush=True)
# manager = MyManager()
# manager.start()
if not opt.multi_dataset:
if opt.data_format in ['bin', 'raw']:
start = time.time()
if opt.data.endswith(".train.pt"):
lprint("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
lprint("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
lprint("Done after %s" % elapse)
dicts = dataset['dicts']
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if train_dict['src_atb'] is not None:
assert 'atbs' in dicts
train_src_atbs = train_dict['src_atb']
train_tgt_atbs = train_dict['tgt_atb']
else:
# allocate new languages
dicts['atbs'] = {'nothingness': 0}
train_src_atbs = list()
train_tgt_atbs = list()
train_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
train_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
if not opt.streaming:
train_data = onmt.Dataset(numpy_to_torch(train_dict['src']), numpy_to_torch(train_dict['tgt']),
train_dict['src_sizes'], train_dict['tgt_sizes'],
train_src_langs, train_tgt_langs,
train_src_atbs, train_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=dataset.get("type", "text"), sorting=True, cleaning=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
input_size=opt.input_size,
upsampling=opt.upsampling,
num_split=1,
constants=onmt.constants)
else:
train_data = onmt.StreamDataset(train_dict['src'], train_dict['tgt'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech,
upsampling=opt.upsampling)
dicts['tgt_pad'] = train_data.tgt_pad
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if valid_dict['src_atb'] is not None:
assert 'atbs' in dicts
valid_src_atbs = valid_dict['src_atb']
valid_tgt_atbs = valid_dict['tgt_atb']
else:
# allocate new languages
valid_src_atbs = list()
valid_tgt_atbs = list()
valid_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
valid_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
if not opt.streaming:
valid_data = onmt.Dataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_dict['src_sizes'], valid_dict['tgt_sizes'],
valid_src_langs, valid_tgt_langs,
valid_src_atbs, valid_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling,
input_size=opt.input_size,
constants=onmt.constants)
else:
valid_data = onmt.StreamDataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
upsampling=opt.upsampling)
lprint(' * number of training sentences. %d' % len(dataset['train']['src']))
lprint(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
# Loading asr data structures
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
lprint("Loading memory mapped data files ....")
start = time.time()
from onmt.data.scp_dataset import SCPIndexDataset
dicts = torch.load(opt.data + ".dict.pt")
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(opt.data + ".scp_path.pt")
elif opt.data_format in ['wav']:
audio_data = torch.load(opt.data + ".wav_path.pt")
# # TODO: maybe having another option like -past_context
# if os.path.exists(opt.data + '.prev_src_path.pt'):
# prev_audio_data = torch.load(opt.data + '.prev_src_path.pt')
# else:
# prev_audio_data = None
# allocate languages if not
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
else:
lprint(dicts['langs'])
train_path = opt.data + '.train'
if opt.data_format in ['scp', 'scpmem']:
train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)
if 'train_past' in audio_data:
past_train_src = SCPIndexDataset(audio_data['train_past'],
concat=opt.concat, shared_object=train_src)
else:
past_train_src = None
elif opt.data_format in ['wav']:
train_src = WavDataset(audio_data['train'], cache_size=opt.data_cache_size)
past_train_src = None
else:
train_src = MMapIndexedDataset(train_path + '.src')
past_train_src = None
train_tgt = MMapIndexedDataset(train_path + '.tgt')
# check the lang files if they exist (in the case of multi-lingual models)
if os.path.exists(train_path + '.src_lang.bin'):
assert 'langs' in dicts
train_src_langs = MMapIndexedDataset(train_path + '.src_lang')
train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')
else:
train_src_langs = list()
train_tgt_langs = list()
# Allocate a Tensor(1) for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if os.path.exists(train_path + '.src_atb.bin'):
assert 'atbs' in dicts
train_src_atbs = MMapIndexedDataset(train_path + '.src_atb')
train_tgt_atbs = MMapIndexedDataset(train_path + '.tgt_atb')
else:
dicts['atbs'] = {'nothingness': 0}
train_src_atbs = list()
train_tgt_atbs = list()
train_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
train_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
# check the length files if they exist
if os.path.exists(train_path + '.src_sizes.npy'):
train_src_sizes = np.load(train_path + '.src_sizes.npy')
train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')
else:
train_src_sizes, train_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(train_path + '.past_src_sizes.npy'):
past_train_src_sizes = np.load(train_path + '.past_src_sizes.npy')
else:
past_train_src_sizes = None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
train_data = onmt.Dataset(train_src,
train_tgt,
train_src_sizes, train_tgt_sizes,
train_src_langs, train_tgt_langs,
train_src_atbs, train_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
past_src_data=past_train_src,
past_src_data_sizes=past_train_src_sizes,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
constants=onmt.constants)
else:
train_data = onmt.StreamDataset(train_src,
train_tgt,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=False,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling)
dicts['tgt_pad'] = train_data.tgt_pad
valid_path = opt.data + '.valid'
if opt.data_format in ['scp', 'scpmem']:
valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)
if 'valid_past' in audio_data:
past_valid_src = SCPIndexDataset(audio_data['valid_past'],
concat=opt.concat, shared_object=valid_src)
else:
past_valid_src = None
elif opt.data_format in ['wav']:
valid_src = WavDataset(audio_data['valid'], cache_size=opt.data_cache_size)
past_valid_src = None
else:
valid_src = MMapIndexedDataset(valid_path + '.src')
past_valid_src = None
valid_tgt = MMapIndexedDataset(valid_path + '.tgt')
if os.path.exists(valid_path + '.src_lang.bin'):
assert 'langs' in dicts
valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')
valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')
else:
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
if os.path.exists(valid_path + '.src_atb.bin'):
assert 'atbs' in dicts
valid_src_atbs = MMapIndexedDataset(valid_path + '.src_atb')
valid_tgt_atbs = MMapIndexedDataset(valid_path + '.tgt_atb')
else:
valid_src_atbs = list()
valid_tgt_atbs = list()
valid_src_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
valid_tgt_atbs.append(torch.Tensor([dicts['atbs']['nothingness']]))
# check the length files if they exist
if os.path.exists(valid_path + '.src_sizes.npy'):
valid_src_sizes = np.load(valid_path + '.src_sizes.npy')
valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')
else:
valid_src_sizes, valid_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(valid_path + '.past_src_sizes.npy'):
past_valid_src_sizes = np.load(valid_path + '.past_src_sizes.npy')
else:
past_valid_src_sizes = None
if not opt.streaming:
valid_data = onmt.Dataset(valid_src, valid_tgt,
valid_src_sizes, valid_tgt_sizes,
valid_src_langs, valid_tgt_langs,
valid_src_atbs, valid_tgt_atbs,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
input_size=opt.input_size,
batch_size_sents=opt.batch_size_sents,
cleaning=True, verbose=True, debug=True,
past_src_data=past_valid_src,
past_src_data_sizes=past_valid_src_sizes,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
min_src_len=1, min_tgt_len=3,
constants=onmt.constants)
else:
# for validation data, we have to go through sentences (very slow but to ensure correctness)
valid_data = onmt.StreamDataset(valid_src, valid_tgt,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
lprint("Done after %s" % elapse)
else:
raise NotImplementedError
lprint(' * number of sentences in training data: %d' % train_data.size())
lprint(' * number of sentences in validation data: %d' % valid_data.size())
# Multi-data set handling
else:
lprint("[INFO] Reading multiple dataset ...")
dicts = torch.load(opt.data + ".dict.pt")
lprint("Languages: ", dicts['langs'])
if 'atbs' not in dicts or len(dicts['atbs']) == 0: # backward compatible
dicts['atbs'] = {'nothingness': 0}
lprint("Atributes: ", dicts['atbs'])
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
root_dir = os.path.dirname(opt.data)
lprint("Loading training data ...")
train_dirs, valid_dirs = dict(), dict()
# scan the data directory to find the training data
for dir_ in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, dir_)):
if str(dir_).startswith("train"):
idx = int(dir_.split(".")[1])
train_dirs[idx] = dir_
if dir_.startswith("valid"):
idx = int(dir_.split(".")[1])
valid_dirs[idx] = dir_
train_sets, valid_sets = list(), list()
c = 0
for (idx_, dir_) in sorted(train_dirs.items()):
c += 1
data_dir = os.path.join(root_dir, dir_)
lprint("[INFO] Loading training data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data, cache_size=opt.data_cache_size)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_atb.bin')):
src_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_atb'))
tgt_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_atb'))
else:
src_atbs_data = list()
tgt_atbs_data = list()
src_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
tgt_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type in ['audio', 'wav2vec2_scp']:
data_type = 'audio'
elif opt.encoder_type == 'wav2vec2':
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
constants = dill.dumps(onmt.constants)
train_data = onmt.Dataset(src_data,
tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
upsampling=opt.upsampling,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
max_src_len=opt.max_src_length,
max_tgt_len=opt.max_tgt_length,
input_size=opt.input_size,
constants=constants)
if c == 1:
dicts['tgt_pad'] = train_data.get_tgt_pad()
del src_sizes, tgt_sizes, src_data, tgt_data, src_lang_data, tgt_lang_data
train_sets.append(train_data)
else:
lprint("Multi-dataset not implemented for Streaming tasks.")
raise NotImplementedError
for (idx_, dir_) in sorted(valid_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
lprint("[INFO] Loading validation data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data, cache_size=opt.data_cache_size)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
# load data attributes
if os.path.exists(os.path.join(data_dir, 'data.src_atb.bin')):
src_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_atb'))
tgt_atbs_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_atb'))
else:
src_atbs_data = list()
tgt_atbs_data = list()
src_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
tgt_atbs_data.append(torch.Tensor([dicts['atbs']['nothingness']]))
# load data size
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type in ['audio', 'wav2vec2_scp']:
data_type = 'audio'
elif opt.encoder_type == 'wav2vec2':
data_type = 'wav'
else:
data_type = 'text'
if not opt.streaming:
constants = dill.dumps(onmt.constants)
valid_data = onmt.Dataset(src_data, tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
src_atbs_data, tgt_atbs_data,
batch_size_words=opt.batch_size_words,
batch_size_frames=opt.batch_size_frames,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
min_src_len=1, min_tgt_len=3,
input_size=opt.input_size,
cleaning=True, verbose=True,
constants=constants)
valid_sets.append(valid_data)
else:
raise NotImplementedError
train_data = train_sets
valid_data = valid_sets
if opt.load_from and not opt.reset_optim:
lprint("Loading checkpoint: ", opt.load_from)
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
lprint("* Loading dictionaries from the checkpoint")
del checkpoint['model']
del checkpoint['optim']
if opt.override_dict_from_checkpoint:
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
lprint(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
lprint(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
os.environ['MASTER_ADDR'] = opt.master_addr # default 'localhost'
os.environ['MASTER_PORT'] = opt.master_port # default '8888'
# spawn N processes for N gpus
# each process has a different trainer
constants = dill.dumps(onmt.constants)
if opt.gem_training:
# if len(opt.gpus) > 1:
# # torch.multiprocessing.spawn(run_gem_process, nprocs=len(opt.gpus),
# # args=(train_data, valid_data, dicts, opt, checkpoint, constants))
#
# torch.multiprocessing.spawn(run_gem_process, nprocs=len(opt.gpus),
# args=(train_data, valid_data, dicts, opt, checkpoint, constants))
# else:
run_gem_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants)
else:
run_process(gpu, train_data, valid_data, dicts, opt, checkpoint, constants)
# torch.multiprocessing.spawn(run_process, nprocs=len(opt.gpus),
# args=(train_data, valid_data, dicts, opt, checkpoint, constants),
# start_method='fork')
if __name__ == "__main__":
warnings.filterwarnings("ignore", message="The given NumPy array is not writeable ")
torch.multiprocessing.set_sharing_strategy('file_system')
parser = argparse.ArgumentParser(description='train_distributed.py')
onmt.markdown.add_md_help_argument(parser)
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
if len(opt.gpus) == 1:
main(0, opt)
else:
torch.multiprocessing.spawn(main, args=(opt, ),
nprocs=len(opt.gpus))
| 33,419
| 44.345997
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/train_language_model.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import torch.nn as nn
from torch import cuda
from torch.autograd import Variable
import math
import time, datetime
from onmt.train_utils.trainer import XETrainer
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from onmt.model_factory import build_language_model, optimize_model
from onmt.data.lm_dataset import LanguageModelDataset
from collections import defaultdict
parser = argparse.ArgumentParser(description='train.py')
onmt.markdown.add_md_help_argument(parser)
from options import make_parser
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
print(opt)
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def main():
start = time.time()
print("Loading data from '%s'" % opt.data)
if opt.data_format == 'raw':
dataset = torch.load(opt.data)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
dicts = dataset['dicts']
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
train_data = LanguageModelDataset(
dataset['train']['tgt'], train_tgt_langs,
batch_size_sents=opt.batch_size_sents,
seq_length=opt.lm_seq_length)
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
valid_data = LanguageModelDataset(
dataset['valid']['tgt'], valid_tgt_langs,
batch_size_sents=opt.batch_size_sents,
seq_length=opt.lm_seq_length)
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
print("* Loading dictionaries from the checkpoint")
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
print(' * number of training sentences. %d' %
train_data.size())
print(' * maximum batch size (words per batch). %d' % (opt.batch_size_sents * opt.lm_seq_length))
else:
raise NotImplementedError
print('Building model...')
model = build_language_model(opt, dicts)
optimize_model(model)
""" Building the loss function """
loss_function = NMTLossFunc(opt.model_size, dicts['tgt'].size(), label_smoothing=opt.label_smoothing)
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
if len(opt.gpus) > 1 or opt.virtual_gpu > 1:
raise NotImplementedError("Multi-GPU training is not supported ATM.")
else:
trainer = XETrainer(model, loss_function, train_data, valid_data, dicts, opt)
trainer.run(checkpoint=checkpoint)
if __name__ == "__main__":
main()
| 4,807
| 32.158621
| 105
|
py
|
NMTGMinor
|
NMTGMinor-master/train.py
|
#!/usr/bin/env python
from train_distributed import main
if __name__ == "__main__":
main()
| 96
| 15.166667
| 34
|
py
|
NMTGMinor
|
NMTGMinor-master/preprocess.py
|
#!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import numpy as np
import warnings
import os
from os.path import dirname, abspath
import gc
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-multi_dataset', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-multi_mirror', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-resume', action='store_true',
help="If the dataset is created, ignored and create the next one")
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-train_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def save_dataset(path, data, format, dicts, src_type):
# Each dataset is comprised of the following components:
# src: tensors for the source vectors, or the scp_path (in ASR case)
# tgt: tensors for the target vectors
# src_lang: tensors for the source language ids (simplified)
# tgt_lang: tensors for the target language ids (simplified)
# convert all datasets to pytorch tensors and save to .pt
if format in ['raw', 'bin']:
print('Saving data to ' + os.path.join(path, 'data.pt') + '...')
save_data = {'type': opt.src_type ,
'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print("Done")
# for ASR only
elif format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
# TODO: changing this to before saving everything
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy") % set_, np_array)
else:
print("Training %s not found " % set_)
# Finally save the audio path
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if 'prev_src' in data and data['prev_src'] is not None:
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy" % set_), np_array)
else:
print("Set %s not found " % set_)
def make_lm_data(tgt_file, tgt_dicts, max_tgt_length=1000, input_type='word', data_type='int32'):
tgt = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (tgt_file))
tgtf = open(tgt_file)
eos = torch.LongTensor(1).fill_(opt.tgt_eos_token)
# print(eos.size())
tensors = [eos]
# find the number of words in the sentence
while True:
tline = tgtf.readline()
# normal end of file
if tline == "":
break
tline = tline.strip()
# source and/or target are empty
if tline == "":
print('WARNING: ignoring an empty line (' + str(count + 1) + ')')
continue
if input_type == 'word':
tgt_words = tline.split()
elif input_type == 'char':
tgt_words = split_line_by_char(tline)
tensor = tgt_dicts.convertToIdx(tgt_words,
opt.tgt_unk_token,
None,
opt.tgt_eos_token,
type=data_type)
# print(tensor.size())
tensors.append(tensor)
count = count + 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
tgtf.close()
# concatenate all tensors into one
tensor = torch.cat(tensors, dim=-1)
return tensor
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False,
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[],
early_save=False, savedir="", mirror=False, mirror_savedir=""):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
if type(lang_list) is dict:
lang_list = sorted(list(lang_list.keys()))
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=src_lang, lang_list=lang_list, target=False
)
if early_save:
os.makedirs(savedir, exist_ok=True)
if mirror:
os.makedirs(mirror_savedir, exist_ok=True)
src_len = len(binarized_src['data'])
print("Saving source data to %s .... with %d entries" % (savedir, src_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "src"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_src['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "src"))
del binarized_src['data']
gc.collect()
np_array = np.asarray(binarized_src['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "src_sizes"), np_array)
del binarized_src
del indexed_data
del np_array
gc.collect()
if mirror:
print("Saving mirrrored target data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "src")
target = os.path.join(mirror_savedir, "data.%s.bin" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "src")
target = os.path.join(mirror_savedir, "data.%s.idx" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "src_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "tgt_sizes")
os.symlink(os.path.abspath(source), target)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True
)
if early_save:
tgt_len = len(binarized_tgt['data'])
assert tgt_len == src_len, "Number of samples doesn't match between source and target!!!"
print("Saving target data to %s .... with %d samples" % (savedir, tgt_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "tgt"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_tgt['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "tgt"))
del binarized_tgt['data']
del indexed_data
gc.collect()
np_array = np.asarray(binarized_tgt['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "tgt_sizes"), np_array)
del binarized_tgt
del np_array
gc.collect()
if mirror:
print("Saving mirrored source data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.bin" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.idx" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "tgt_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "src_sizes")
os.symlink(os.path.abspath(source), target)
src, tgt, src_sizes, tgt_sizes = None, None, None, None
else:
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="scp", output_format="raw",
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[]):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
else:
tgt = None
tgt_sizes = None
print('[INFO] Processing %s ...' % src_file)
# num_workers = num_workers if asr_format in ['scp', 'kaldi'] else 1
# speech binarizer has to be 1 thread at the moment
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if len(src_sizes) != len(tgt_sizes) and tgt_file is not None:
print("Warning: data size mismatched. Src: %d . Tgt: %d" % len(src_sizes), len(tgt_sizes))
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict is not None and len(opt.load_dict) > 0:
print("[INFO] Loading dictionary from ... %s" % opt.load_dict)
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if len (opt.train_src_atbs) > 0:
src_atbs = opt.train_src_atbs.split("|")
tgt_atbs = opt.train_tgt_atbs.split("|")
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if 'langs' not in dicts:
dicts['langs'] = dict()
else:
print(dicts['langs'])
print("Adding languages to existing dictionary ...")
for lang in langs:
idx = len(dicts['langs'])
if lang not in dicts['langs']:
dicts['langs'][lang] = idx
if 'atbs' not in dicts:
dicts['atbs'] = dict()
else:
print("Adding attributes to existing dictionary ...")
for atb in atbs:
idx = len(dicts['atbs'])
if atb not in dicts['atbs']:
dicts['atbs'][atb] = idx
print("Languages: ", dicts['langs'])
print("Attributes: ", dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# for ASR and LM we only need to build vocab for the 'target' language
if opt.asr or opt.lm:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt,
dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt,
dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
src_atbs = opt.train_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.train_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(src_atbs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
assert len(tgt_input_files) == len(tgt_atbs)
past_src_files = opt.past_train_src.split("|")
idx = 0
n_input_files = len(src_input_files)
# Training data ###################################################################
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_atb'], train['tgt_atb'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
data = dict()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "train.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
print("Checking existing path %s ..." % dataset_path)
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
src_atb_data, tgt_atb_data = None, None
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
add_bos=not opt.no_bos,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
# Finalizing Training data ###################################################################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving training set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "train.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
# Validation data ###################################################################
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
src_atbs = opt.valid_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.valid_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
valid['src_atb'], valid['tgt_atb'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "valid.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
# Finalizing Validation data ... #########################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else: # MACHINE TRANSLATION DATA
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = idx if not opt.multi_mirror else 2 * idx
data_name = "train.%i.%s-%s" % (dataset_idx , src_lang, tgt_lang)
mirrored_data_name = "train.%i.%s-%s" % (dataset_idx + 1 , tgt_lang, src_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if opt.multi_dataset and opt.resume:
print("Checking existing path %s ..." % dataset_path)
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
else:
os.makedirs(dataset_path, exist_ok=True)
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'],
early_save=opt.multi_dataset,
savedir=dataset_path,
mirror=opt.multi_mirror,
mirror_savedir=mirrored_dataset_path)
#TODO: check
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert src_data is not None
n_samples = len(src_data)
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx + 1, tgt_lang, src_lang))
# take basedir from opt.save_data
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs']
)
n_samples = len(src_data)
#TODO: this has to be changed
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
if opt.multi_dataset:
# SAVE DATA
print("Saving dictionary to %s" % (opt.save_data + '.dict.pt'))
torch.save(dicts, opt.save_data + '.dict.pt')
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print("Finished.")
else:
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if set_ not in train or train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if set_ not in train or train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
| 63,944
| 43.498956
| 119
|
py
|
NMTGMinor
|
NMTGMinor-master/flask_online.py
|
#!/usr/bin/env python
from onmt.online_translator import RecognizerParameter, ASROnlineTranslator
from flask import Flask, request
import torch
import numpy as np
import math
import sys
import json
import threading
import queue
import uuid
import traceback
import subprocess
host = sys.argv[1] # 192.168.0.72
port = sys.argv[2] # 5051
if len(sys.argv)<=2:
filename = "model.conf"
else:
filename = sys.argv[3]
conf_data = open(filename,"r").read().split("\n")
model = None
for d in conf_data:
d = d.split()
if len(d)==2 and d[0]=="model":
model = d[1]
break
conf_data.append("model_ls "+str(subprocess.run(("ls -l "+model).split(), capture_output=True).stdout))
conf_data = "\n".join(conf_data)
app = Flask(__name__)
def create_unique_list(my_list):
my_list = list(set(my_list))
return my_list
def initialize_model():
model = ASROnlineTranslator(filename)
print("ASR initialized")
max_batch_size = 16
return model, max_batch_size
def use_model(reqs):
if len(reqs) == 1:
req = reqs[0]
audio_tensor, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
hypo = model.translate(audio_tensor, [prefix])
result = {"hypo": hypo}
req.publish(result)
else:
audio_tensors = list()
prefixes = list()
input_languages = list()
output_languages = list()
batch_runnable = False
for req in reqs:
audio_tensor, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
audio_tensors.append(audio_tensor)
prefixes.append(prefix)
input_languages.append(input_language)
output_languages.append(output_language)
unique_prefix_list = create_unique_list(prefixes)
unique_input_languages = create_unique_list(input_languages)
unique_output_languages = create_unique_list(output_languages)
if len(unique_prefix_list) == 1 and len(unique_input_languages) == 1 and len(unique_output_languages) == 1:
batch_runnable = True
if batch_runnable:
model.set_language(input_languages[0], output_languages[0])
hypos = model.translate_batch(audio_tensors, prefixes)
for req, hypo in zip(reqs, hypos):
result = {"hypo": hypo}
req.publish(result)
else:
for req, audio_tensor, prefix, input_language, output_language \
in zip(reqs, audio_tensors, prefixes, input_languages, output_languages):
model.set_language(input_language, output_language)
hypo = model.translate(audio_tensor, [prefix])
result = {"hypo": hypo}
req.publish(result)
def run_decoding():
while True:
reqs = [queue_in.get()]
while not queue_in.empty() and len(reqs) < max_batch_size:
req = queue_in.get()
reqs.append(req)
if req.priority >= 1:
break
print("Batch size:",len(reqs),"Queue size:",queue_in.qsize())
try:
use_model(reqs)
except Exception as e:
print("An error occured during model inference")
traceback.print_exc()
for req in reqs:
req.publish({"hypo":"", "status":400})
class Priority:
next_index = 0
def __init__(self, priority, id, condition, data):
self.index = Priority.next_index
Priority.next_index += 1
self.priority = priority
self.id = id
self.condition = condition
self.data = data
def __lt__(self, other):
return (-self.priority, self.index) < (-other.priority, other.index)
def get_data(self):
return self.data
def publish(self, result):
dict_out[self.id] = result
try:
with self.condition:
self.condition.notify()
except:
print("ERROR: Count not publish result")
def pcm_s16le_to_tensor(pcm_s16le):
audio_tensor = np.frombuffer(pcm_s16le, dtype=np.int16)
audio_tensor = torch.from_numpy(audio_tensor)
audio_tensor = audio_tensor.float() / math.pow(2, 15)
audio_tensor = audio_tensor.unsqueeze(1) # shape: frames x 1 (1 channel)
return audio_tensor
# corresponds to an asr_server "http://$host:$port/asr/infer/en,en" in StreamASR.py
# use None when no input- or output language should be specified
@app.route("/asr/infer/<input_language>,<output_language>", methods=["POST"])
def inference(input_language, output_language):
pcm_s16le: bytes = request.files.get("pcm_s16le").read()
prefix = request.files.get("prefix") # can be None
if prefix is not None:
prefix: str = prefix.read().decode("utf-8")
# calculate features corresponding to a torchaudio.load(filepath) call
audio_tensor = pcm_s16le_to_tensor(pcm_s16le)
priority = request.files.get("priority") # can be None
try:
priority = int(priority.read()) # used together with priority queue
except:
priority = 0
condition = threading.Condition()
with condition:
id = str(uuid.uuid4())
data = (audio_tensor,prefix,input_language,output_language)
queue_in.put(Priority(priority,id,condition,data))
condition.wait()
result = dict_out.pop(id)
status = 200
if status in result:
status = result.pop(status)
# result has to contain a key "hypo" with a string as value (other optional keys are possible)
return json.dumps(result), status
# called during automatic evaluation of the pipeline to store worker information
@app.route("/asr/version", methods=["POST"])
def version():
# return dict or string (as first argument)
return conf_data, 200
model, max_batch_size = initialize_model()
queue_in = queue.PriorityQueue()
dict_out = {}
decoding = threading.Thread(target=run_decoding)
decoding.daemon = True
decoding.start()
app.run(host=host, port=port)
| 6,118
| 29.595
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/flask_mt.py
|
#!/usr/bin/env python
# from onmt.online_translator import RecognizerParameter, ASROnlineTranslator
from onmt.online_translator import TranslatorParameter, OnlineTranslator
from flask import Flask, request
import torch
import numpy as np
import math
import sys
import json
import threading
import queue
import uuid
import traceback
import subprocess
host = sys.argv[1] # 192.168.0.72
port = sys.argv[2] # 5051
#if len(sys.argv)<=2:
filename = "model.conf"
print(host, port)
#else:
# filename = sys.argv[3]
# I have no idea what these lines are doing
conf_data = open(filename,"r").read().split("\n")
model = None
for d in conf_data:
d = d.split()
if len(d)==2 and d[0]=="model":
model = d[1]
break
conf_data.append("model_ls "+str(subprocess.run(("ls -l "+model).split(), capture_output=True).stdout))
conf_data = "\n".join(conf_data)
app = Flask(__name__)
def create_unique_list(my_list):
"""
This function is used in checking if the prefixes and languages are the same or not
Args:
my_list:
Returns:
"""
my_list = list(set(my_list))
return my_list
def initialize_model():
"""
Build the translator
"""
model = OnlineTranslator(filename)
print("MT Model initialized")
max_batch_size = 16
return model, max_batch_size
def use_model(reqs):
if len(reqs) == 1:
req = reqs[0]
input_text, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
hypo = model.translate(input_text, [prefix])
result = {"hypo": hypo}
req.publish(result)
else:
input_texts = list()
prefixes = list()
input_languages = list()
output_languages = list()
batch_runnable = False
for req in reqs:
input_text, prefix, input_language, output_language = req.get_data()
model.set_language(input_language, output_language)
input_texts.append(input_text)
prefixes.append(prefix)
input_languages.append(input_language)
output_languages.append(output_language)
unique_prefix_list = create_unique_list(prefixes)
unique_input_languages = create_unique_list(input_languages)
unique_output_languages = create_unique_list(output_languages)
if len(unique_prefix_list) == 1 and len(unique_input_languages) == 1 and len(unique_output_languages) == 1:
batch_runnable = True
if batch_runnable:
model.set_language(input_languages[0], output_languages[0])
hypos = model.translate_batch(input_texts, prefixes)
for req, hypo in zip(reqs, hypos):
result = {"hypo": hypo}
req.publish(result)
else:
for req, input_text, prefix, input_language, output_language \
in zip(reqs, input_texts, prefixes, input_languages, output_languages):
model.set_language(input_language, output_language)
hypo = model.translate(input_text, [prefix])
result = {"hypo": hypo}
req.publish(result)
def run_decoding():
while True:
reqs = [queue_in.get()]
while not queue_in.empty() and len(reqs) < max_batch_size:
req = queue_in.get()
reqs.append(req)
if req.priority >= 1:
break
print("Batch size:",len(reqs),"Queue size:",queue_in.qsize())
try:
use_model(reqs)
except Exception as e:
print("An error occured during model inference")
traceback.print_exc()
for req in reqs:
req.publish({"hypo":"", "status":400})
class Priority:
next_index = 0
def __init__(self, priority, id, condition, data):
self.index = Priority.next_index
Priority.next_index += 1
self.priority = priority
self.id = id
self.condition = condition
self.data = data
def __lt__(self, other):
return (-self.priority, self.index) < (-other.priority, other.index)
def get_data(self):
return self.data
def publish(self, result):
dict_out[self.id] = result
try:
with self.condition:
self.condition.notify()
except:
print("ERROR: Count not publish result")
# corresponds to an asr_server "http://$host:$port/asr/infer/en,en" in StreamASR.py
# use None when no input- or output language should be specified
# @app.route("/asr/infer/<input_language>,<output_language>", methods=["POST"])
@app.route("/predictions/<input_language>,<output_language>", methods=["POST"])
def inference(input_language, output_language):
# pcm_s16le: bytes = request.files.get("pcm_s16le").read()
# prefix = request.files.get("prefix") # can be None
# if prefix is not None:
# prefix: str = prefix.read().decode("utf-8")
# note: in ASR/SLT it should be "request.files"
# while in MT it's "request.data"
input_text = request.form['text'] # can be None
try:
prefix = request.form['prefix'] # can be None
except:
prefix = None
#
print("RECEIVED INPUT TEXT:", input_text)
try:
priority = request.form["priority"] # can be None
priority = int(priority.read()) # used together with priority queue
except:
priority = 0
condition = threading.Condition()
with condition:
id = str(uuid.uuid4())
# the same with SLT
data = (input_text, prefix, input_language, output_language)
queue_in.put(Priority( priority, id, condition, data))
condition.wait()
result = dict_out.pop(id)
status = 200
if status in result:
status = result.pop(status)
# result has to contain a key "hypo" with a string as value (other optional keys are possible)
return json.dumps(result), status
# called during automatic evaluation of the pipeline to store worker information
@app.route("/models/<input_language>,<output_language>", methods=["GET"])
def version(input_language, output_language):
# print(input_language, output_language)
# return dict or string (as first argument)
return conf_data, 200
model, max_batch_size = initialize_model()
queue_in = queue.PriorityQueue()
dict_out = {}
decoding = threading.Thread(target=run_decoding)
decoding.daemon = True
decoding.start()
app.run(host=host, port=port)
| 6,523
| 27.867257
| 115
|
py
|
NMTGMinor
|
NMTGMinor-master/train_classify.py
|
#!/usr/bin/env python
from __future__ import division
import onmt
import onmt.markdown
import onmt.modules
import argparse
import torch
import time, datetime
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
from onmt.data.wav_dataset import WavDataset
from onmt.modules.loss import NMTLossFunc, NMTAndCTCLossFunc
from options import make_parser
from collections import defaultdict
from onmt.constants import add_tokenidx
import os
import numpy as np
parser = argparse.ArgumentParser(description='train_distributed.py')
onmt.markdown.add_md_help_argument(parser)
# Please look at the options file to see the options regarding models and data
parser = make_parser(parser)
opt = parser.parse_args()
# An ugly hack to have weight norm on / off
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.checkpointing = opt.checkpointing
onmt.constants.max_position_length = opt.max_position_length
# Use static dropout if checkpointing > 0
if opt.checkpointing > 0:
onmt.constants.static = True
if torch.cuda.is_available() and not opt.gpus:
print("WARNING: You have a CUDA device, should run with -gpus 0")
torch.manual_seed(opt.seed)
def numpy_to_torch(tensor_list):
out_list = list()
for tensor in tensor_list:
if isinstance(tensor, np.ndarray):
out_list.append(torch.from_numpy(tensor))
else:
out_list.append(tensor)
return out_list
def run_process(gpu, train_data, valid_data, dicts, opt, checkpoint):
# from onmt.train_utils.mp_trainer import Trainer
from onmt.train_utils.classify_trainer import ClassifierTrainer
trainer = ClassifierTrainer(gpu, train_data, valid_data, dicts, opt)
trainer.run(checkpoint=checkpoint)
def main():
if not opt.multi_dataset:
if opt.data_format in ['bin', 'raw']:
start = time.time()
if opt.data.endswith(".train.pt"):
print("Loading data from '%s'" % opt.data)
dataset = torch.load(opt.data)
else:
print("Loading data from %s" % opt.data + ".train.pt")
dataset = torch.load(opt.data + ".train.pt")
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
dicts = dataset['dicts']
onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
# For backward compatibility
train_dict = defaultdict(lambda: None, dataset['train'])
valid_dict = defaultdict(lambda: None, dataset['valid'])
if train_dict['src_lang'] is not None:
assert 'langs' in dicts
train_src_langs = train_dict['src_lang']
train_tgt_langs = train_dict['tgt_lang']
else:
# allocate new languages
dicts['langs'] = {'src': 0, 'tgt': 1}
train_src_langs = list()
train_tgt_langs = list()
# Allocation one for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
train_data = onmt.Dataset(numpy_to_torch(train_dict['src']), numpy_to_torch(train_dict['tgt']),
train_dict['src_sizes'], train_dict['tgt_sizes'],
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
upsampling=opt.upsampling,
num_split=1)
if valid_dict['src_lang'] is not None:
assert 'langs' in dicts
valid_src_langs = valid_dict['src_lang']
valid_tgt_langs = valid_dict['tgt_lang']
else:
# allocate new languages
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
valid_data = onmt.Dataset(numpy_to_torch(valid_dict['src']), numpy_to_torch(valid_dict['tgt']),
valid_dict['src_sizes'], valid_dict['tgt_sizes'],
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=dataset.get("type", "text"), sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
cleaning=True,
upsampling=opt.upsampling)
print(' * number of training sentences. %d' % len(dataset['train']['src']))
print(' * maximum batch size (words per batch). %d' % opt.batch_size_words)
# Loading asr data structures
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
print("Loading memory mapped data files ....")
start = time.time()
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
dicts = torch.load(opt.data + ".dict.pt")
# onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(opt.data + ".scp_path.pt")
elif opt.data_format in ['wav']:
audio_data = torch.load(opt.data + ".wav_path.pt")
# allocate languages if not
if 'langs' not in dicts:
dicts['langs'] = {'src': 0, 'tgt': 1}
else:
print(dicts['langs'])
train_path = opt.data + '.train'
if opt.data_format in ['scp', 'scpmem']:
train_src = SCPIndexDataset(audio_data['train'], concat=opt.concat)
if 'train_past' in audio_data:
past_train_src = SCPIndexDataset(audio_data['train_past'],
concat=opt.concat, shared_object=train_src)
else:
past_train_src = None
elif opt.data_format in ['wav']:
train_src = WavDataset(audio_data['train'])
past_train_src = None
else:
train_src = MMapIndexedDataset(train_path + '.src')
past_train_src = None
train_tgt = MMapIndexedDataset(train_path + '.tgt')
# check the lang files if they exist (in the case of multi-lingual models)
if os.path.exists(train_path + '.src_lang.bin'):
assert 'langs' in dicts
train_src_langs = MMapIndexedDataset(train_path + '.src_lang')
train_tgt_langs = MMapIndexedDataset(train_path + '.tgt_lang')
else:
train_src_langs = list()
train_tgt_langs = list()
# Allocate a Tensor(1) for the bilingual case
train_src_langs.append(torch.Tensor([dicts['langs']['src']]))
train_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(train_path + '.src_sizes.npy'):
train_src_sizes = np.load(train_path + '.src_sizes.npy')
train_tgt_sizes = np.load(train_path + '.tgt_sizes.npy')
else:
train_src_sizes, train_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(train_path + '.past_src_sizes.npy'):
past_train_src_sizes = np.load(train_path + '.past_src_sizes.npy')
else:
past_train_src_sizes = None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
train_data = onmt.Dataset(train_src,
train_tgt,
train_src_sizes, train_tgt_sizes,
train_src_langs, train_tgt_langs,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
past_src_data=past_train_src,
min_src_len=0, min_tgt_len=0,
past_src_data_sizes=past_train_src_sizes,
constants=onmt.constants)
valid_path = opt.data + '.valid'
if opt.data_format in ['scp', 'scpmem']:
valid_src = SCPIndexDataset(audio_data['valid'], concat=opt.concat)
if 'valid_past' in audio_data:
past_valid_src = SCPIndexDataset(audio_data['valid_past'],
concat=opt.concat, shared_object=valid_src)
else:
past_valid_src = None
elif opt.data_format in ['wav']:
valid_src = WavDataset(audio_data['valid'])
past_valid_src = None
else:
valid_src = MMapIndexedDataset(valid_path + '.src')
past_valid_src = None
valid_tgt = MMapIndexedDataset(valid_path + '.tgt')
if os.path.exists(valid_path + '.src_lang.bin'):
assert 'langs' in dicts
valid_src_langs = MMapIndexedDataset(valid_path + '.src_lang')
valid_tgt_langs = MMapIndexedDataset(valid_path + '.tgt_lang')
else:
valid_src_langs = list()
valid_tgt_langs = list()
# Allocation one for the bilingual case
valid_src_langs.append(torch.Tensor([dicts['langs']['src']]))
valid_tgt_langs.append(torch.Tensor([dicts['langs']['tgt']]))
# check the length files if they exist
if os.path.exists(valid_path + '.src_sizes.npy'):
valid_src_sizes = np.load(valid_path + '.src_sizes.npy')
valid_tgt_sizes = np.load(valid_path + '.tgt_sizes.npy')
else:
valid_src_sizes, valid_tgt_sizes = None, None
# check the length files if they exist
if os.path.exists(valid_path + '.past_src_sizes.npy'):
past_valid_src_sizes = np.load(valid_path + '.past_src_sizes.npy')
else:
past_valid_src_sizes = None
# we can use x2 batch eize for validation
valid_data = onmt.Dataset(valid_src, valid_tgt,
valid_src_sizes, valid_tgt_sizes,
valid_src_langs, valid_tgt_langs,
batch_size_words=opt.batch_size_words * 2,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
input_size=opt.input_size,
batch_size_sents=opt.batch_size_sents,
cleaning=True, verbose=True, debug=True,
past_src_data=past_valid_src,
past_src_data_sizes=past_valid_src_sizes,
min_src_len=0, min_tgt_len=0,
constants=onmt.constants)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Done after %s" % elapse)
else:
raise NotImplementedError
print(' * number of sentences in training data: %d' % train_data.size())
print(' * number of sentences in validation data: %d' % valid_data.size())
else:
print("[INFO] Reading multiple dataset ...")
# raise NotImplementedError
dicts = torch.load(opt.data + ".dict.pt")
# onmt.constants = add_tokenidx(opt, onmt.constants, dicts)
root_dir = os.path.dirname(opt.data)
print("Loading training data ...")
train_dirs, valid_dirs = dict(), dict()
# scan the data directory to find the training data
for dir_ in os.listdir(root_dir):
if os.path.isdir(os.path.join(root_dir, dir_)):
if str(dir_).startswith("train"):
idx = int(dir_.split(".")[1])
train_dirs[idx] = dir_
if dir_.startswith("valid"):
idx = int(dir_.split(".")[1])
valid_dirs[idx] = dir_
train_sets, valid_sets = list(), list()
for (idx_, dir_) in sorted(train_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading training data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
from onmt.data.mmap_indexed_dataset import MMapIndexedDataset
from onmt.data.scp_dataset import SCPIndexDataset
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.data_format in ['scp', 'scpmem']:
data_type = 'audio'
elif opt.data_format in ['wav']:
data_type = 'wav'
else:
data_type = 'text'
train_data = onmt.Dataset(src_data,
tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
multiplier=opt.batch_size_multiplier,
src_align_right=opt.src_align_right,
upsampling=opt.upsampling,
augment=opt.augment_speech, sa_f=opt.sa_f, sa_t=opt.sa_t,
cleaning=True, verbose=True,
input_size=opt.input_size,
constants=onmt.constants)
train_sets.append(train_data)
for (idx_, dir_) in sorted(valid_dirs.items()):
data_dir = os.path.join(root_dir, dir_)
print("[INFO] Loading validation data %i from %s" % (idx_, dir_))
if opt.data_format in ['bin', 'raw']:
raise NotImplementedError
elif opt.data_format in ['scp', 'scpmem', 'mmem', 'wav']:
if opt.data_format in ['scp', 'scpmem']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = SCPIndexDataset(audio_data, concat=opt.concat)
elif opt.data_format in ['wav']:
audio_data = torch.load(os.path.join(data_dir, "data.scp_path.pt"))
src_data = WavDataset(audio_data)
else:
src_data = MMapIndexedDataset(os.path.join(data_dir, "data.src"))
tgt_data = MMapIndexedDataset(os.path.join(data_dir, "data.tgt"))
src_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.src_lang'))
tgt_lang_data = MMapIndexedDataset(os.path.join(data_dir, 'data.tgt_lang'))
if os.path.exists(os.path.join(data_dir, 'data.src_sizes.npy')):
src_sizes = np.load(os.path.join(data_dir, 'data.src_sizes.npy'))
tgt_sizes = np.load(os.path.join(data_dir, 'data.tgt_sizes.npy'))
else:
src_sizes, sizes = None, None
if opt.encoder_type == 'audio':
data_type = 'audio'
else:
data_type = 'text'
valid_data = onmt.Dataset(src_data, tgt_data,
src_sizes, tgt_sizes,
src_lang_data, tgt_lang_data,
batch_size_words=opt.batch_size_words,
multiplier=opt.batch_size_multiplier,
data_type=data_type, sorting=True,
batch_size_sents=opt.batch_size_sents,
src_align_right=opt.src_align_right,
min_src_len=1, min_tgt_len=3,
input_size=opt.input_size,
cleaning=True, verbose=True, constants=onmt.constants)
valid_sets.append(valid_data)
train_data = train_sets
valid_data = valid_sets
if opt.load_from:
checkpoint = torch.load(opt.load_from, map_location=lambda storage, loc: storage)
print("* Loading dictionaries from the checkpoint")
del checkpoint['model']
del checkpoint['optim']
dicts = checkpoint['dicts']
else:
dicts['tgt'].patch(opt.patch_vocab_multiplier)
checkpoint = None
if "src" in dicts:
print(' * vocabulary size. source = %d; target = %d' %
(dicts['src'].size(), dicts['tgt'].size()))
else:
print(' * vocabulary size. target = %d' %
(dicts['tgt'].size()))
os.environ['MASTER_ADDR'] = opt.master_addr # default 'localhost'
os.environ['MASTER_PORT'] = opt.master_port # default '8888'
# spawn N processes for N gpus
# each process has a different trainer
if len(opt.gpus) > 1:
torch.multiprocessing.spawn(run_process, nprocs=len(opt.gpus),
args=(train_data, valid_data, dicts, opt, checkpoint))
else:
run_process(0, train_data, valid_data, dicts, opt, checkpoint)
if __name__ == "__main__":
main()
| 20,484
| 44.220751
| 107
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/create_s4_kernel.py
|
import json
import sys
config = dict()
config["encoder_embed_dim"] = 1024 # or 1024?
config["encoder_mssm_num_stacks"] = 1 # change this to stack more s4
config["encoder_mssm_hidden_dim"] = 512
config["encoder_mssm_num_heads"] = 8
config["encoder_mssm_activation"] = "gelu"
config["encoder_mssm_scale"] = 0.5
config["encoder_mssm_maxlen"] = 1024
config["encoder_mssm_timestep_min"] = 0.01
config["encoder_mssm_timestep_max"] = 0.16
config["encoder_mssm_dropout"] = 0.1
config["dropout"] = 0.1
config["activation_fn"] = "gelu"
config["encoder_ffn_embed_dim"] = 4096
config["relu_dropout"] = 0.1
# DECODER SSM ADDED LATER
file_name = sys.argv[1] if len(sys.argv) >= 2 else "ssm_config.json"
out_file = open(file_name, 'w')
json.dump(config, out_file, indent=4)
out_file.close()
| 782
| 26
| 68
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/grad_check.py
|
import torch.nn as nn
import onmt
import torch
from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
class TestEncoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input):
return ReversibleEncoderFunction.apply(input, self.layers, None)
class TestDecoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, context):
return ReversibleDecoderFunction.apply(input, context, self.layers,
None, None, False, None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=16,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-test_decoder', action='store_true',
help='Test decoder')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
onmt.constants.weight_norm = False
onmt.constants.checkpointing = False
onmt.constants.max_position_length = 4096
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 1
opt.inner_size = 16
bsz = 4
seq_len = 16
input_states = torch.randn(*(seq_len, bsz, opt.model_size*2)).double().cuda()
if not opt.test_decoder:
layers = nn.ModuleList([ReversibleTransformerEncoderLayer(opt) for _ in range(opt.layers)])
# layers.cuda()
net = TestEncoder(layers)
net = net.double().cuda()
print(net)
print("start gradchecking ...")
input_states.requires_grad = True
torch.autograd.gradcheck(net, input_states)
print("gradchecking completed.")
else:
print("Testing decoder ...")
opt.ignore_source = False
layers = nn.ModuleList([ReversibleTransformerDecoderLayer(opt) for _ in range(opt.layers)])
net = TestDecoder(layers)
net = net.double().cuda()
src_seq_len = 8
context = torch.randn(*(src_seq_len, bsz, opt.model_size)).double().cuda()
print("start gradchecking for input and context...")
# input_states.requires_grad = True
context.requires_grad = True
torch.autograd.gradcheck(net, (input_states, context))
print("gradchecking completed.")
# context.requires_grad = True
# input.requires
# print("start gradchecking for context...")
# input_states.requires_grad = True
# torch.autograd.gradcheck(net, (input_states, context))
# print("gradchecking completed.")
| 2,992
| 28.93
| 111
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/perplexity_score.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-lm', required=False,
help='Path to language model .pt file. Used for cold fusion')
parser.add_argument('-autoencoder', required=False,
help='Path to autoencoder .pt file')
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-src_lang', default='src',
help='Source language')
parser.add_argument('-tgt_lang', default='tgt',
help='Target language')
parser.add_argument('-attributes', default="",
help='Attributes for the decoder. Split them by | ')
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-asr_format', default="h5", required=False,
help="Format of asr data h5 or scp")
parser.add_argument('-encoder_type', default='text',
help="Type of encoder to use. Options are [text|img|audio].")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=256,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
parser.add_argument('-start_with_bos', action="store_true",
help="""Add BOS token to the top of the source sentence""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-sampling', action="store_true",
help='Using multinomial sampling instead of beam search')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-bos_token', type=str, default="<s>",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-no_bos_gold', action="store_true",
help='BOS Token (used in multilingual model). Default is <s>.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-alpha', type=float, default=0.6,
help="""Length Penalty coefficient""")
parser.add_argument('-beta', type=float, default=0.0,
help="""Coverage penalty coefficient""")
parser.add_argument('-print_nbest', action='store_true',
help='Output the n-best list instead of a single sentence')
parser.add_argument('-ensemble_op', default='mean', help="""Ensembling operator""")
parser.add_argument('-normalize', action='store_true',
help='To normalize the scores based on output length')
parser.add_argument('-fp16', action='store_true',
help='To use floating point 16 in decoding')
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-fast_translate', action='store_true',
help='Using the fast decoder')
def reportScore(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / (words_total + 1e-9),
name, math.exp(-score_total / (words_total + 1e-9))))
def addone(f):
for line in f:
yield line
yield None
def lenPenalty(s, l, alpha):
l_term = math.pow(l, alpha)
return s / l_term
def getSentenceFromTokens(tokens, input_type):
if input_type == 'word':
sent = " ".join(tokens)
elif input_type == 'char':
sent = "".join(tokens)
else:
raise NotImplementedError
return sent
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
all_scores = torch.empty(0)
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# Always pick n_best
opt.n_best = opt.beam_size
if opt.output == "stdout":
outF = sys.stdout
else:
outF = open(opt.output, 'w')
gold_score_total, gold_words_total = 0, 0,
src_batch, tgt_batch = [], []
cur_batch_sizes = []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
in_file = None
if opt.src == "stdin":
in_file = sys.stdin
opt.batch_size = 1
elif opt.encoder_type == "audio" and opt.asr_format == "h5":
in_file = h5.File(opt.src, 'r')
elif opt.encoder_type == "audio" and opt.asr_format == "scp":
import kaldiio
from kaldiio import ReadHelper
audio_data = iter(ReadHelper('scp:' + opt.src))
else:
in_file = open(opt.src)
from onmt.inference.perplexity_scorer import PerplexityScorer
translator = PerplexityScorer(opt)
# Audio processing for the source batch
if opt.encoder_type == "audio":
s_prev_context = []
t_prev_context = []
i = 0
while True:
if opt.asr_format == "h5":
if i == len(in_file):
break
line = np.array(in_file[str(i)])
i += 1
elif opt.asr_format == "scp":
try:
_, line = next(audio_data)
except StopIteration:
break
if opt.stride != 1:
line = line[0::opt.stride]
line = torch.from_numpy(line)
if opt.concat != 1:
add = (opt.concat - line.size()[0] % opt.concat) % opt.concat
z = torch.FloatTensor(add, line.size()[1]).zero_()
line = torch.cat((line, z), 0)
line = line.reshape((line.size()[0] // opt.concat, line.size()[1] * opt.concat))
if opt.previous_context > 0:
s_prev_context.append(line)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
line = torch.cat((torch.cat((s_prev_context[-i - 1], torch.zeros(1, line.size()[1]))), line))
if len(s_prev_context) > opt.previous_context:
s_prev_context = s_prev_context[-1 * opt.previous_context:]
src_batch += [line]
if tgtF:
tline = tgtF.readline().strip()
if opt.previous_context > 0:
t_prev_context.append(tline)
for i in range(1, opt.previous_context + 1):
if i < len(s_prev_context):
tline = t_prev_context[-i - 1] + " # " + tline
if len(t_prev_context) > opt.previous_context:
t_prev_context = t_prev_context[-1 * opt.previous_context:]
if opt.input_type == 'word':
tgt_tokens = tline.split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tline.strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
if len(src_batch) < opt.batch_size:
continue
print("Batch size:", len(src_batch), len(tgt_batch))
gold_score, num_gold_words, all_gold_scores = translator.translate(src_batch, tgt_batch, type='asr')
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores,
opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
# catch the last batch
if len(src_batch) != 0:
print("Batch size:", len(src_batch), len(tgt_batch))
gold_score, num_gold_words, all_gold_scores = translator.translate(
src_batch,
tgt_batch, type='asr')
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores, opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
# Text processing
else:
for line in addone(in_file):
if line is not None:
if opt.input_type == 'word':
src_tokens = line.split()
elif opt.input_type == 'char':
src_tokens = list(line.strip())
else:
raise NotImplementedError("Input type unknown")
src_batch += [src_tokens]
if tgtF:
# ~ tgt_tokens = tgtF.readline().split() if tgtF else None
if opt.input_type == 'word':
tgt_tokens = tgtF.readline().split() if tgtF else None
elif opt.input_type == 'char':
tgt_tokens = list(tgtF.readline().strip()) if tgtF else None
else:
raise NotImplementedError("Input type unknown")
tgt_batch += [tgt_tokens]
# cur_batch_sizes.append(max([len(src_tokens),len(tgt_tokens)]))
# if len(src_batch) == 0 or (max(cur_batch_sizes) * len(src_batch)) < opt.batch_size:
if len(src_batch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(src_batch) == 0:
break
# actually done beam search from the model
gold_score, num_gold_words, all_gold_scores = translator.translate(src_batch, tgt_batch)
# convert output tensor to words
count, gold_score, goldWords, all_scores = translateBatch(opt, tgtF, count, outF, translator,
src_batch, tgt_batch,
gold_score, num_gold_words,
all_gold_scores, all_scores,
opt.input_type)
gold_score_total += gold_score
gold_words_total += goldWords
src_batch, tgt_batch = [], []
cur_batch_sizes = []
if opt.verbose:
if tgtF: reportScore('GOLD', gold_score_total, gold_words_total)
if tgtF:
tgtF.close()
if opt.dump_beam:
json.dump(translator.beam_accum, open(opt.dump_beam, 'w'))
outF.close()
print(all_scores.size())
all_scores_numpy = all_scores.numpy()
np.savetxt(opt.output, all_scores_numpy, delimiter="\n")
def translateBatch(opt, tgtF, count, outF, translator, src_batch, tgt_batch, gold_score,
num_gold_words, all_gold_scores, all_scores, input_type):
gold_score_total = 0
gold_words_total = 0
if tgtF is not None:
gold_score_total = sum(gold_score).item()
gold_words_total = num_gold_words
batch_size = len(src_batch)
scores = torch.Tensor(batch_size).zero_()
for b in range(len(src_batch)):
count += 1
if opt.normalize:
gold_score_ = gold_score[b] / len(tgt_batch[b])
else:
gold_score_ = gold_score[b]
if opt.verbose:
if opt.encoder_type == "text":
src_sent = " ".join(src_batch[b])
print('SRC %d: %s' % (count, src_sent))
if tgtF is not None:
tgt_sent = getSentenceFromTokens(tgt_batch[b], input_type)
if translator.tgt_dict.lower:
tgt_sent = tgt_sent.lower()
print('GOLD %d: %s ' % (count, tgt_sent))
print("GOLD SCORE: %.4f" % gold_score_)
print()
print('')
else:
if count % 100000 == 0:
print("Finished %d sentences ... " % count)
scores[b].fill_(gold_score_)
all_scores = torch.cat([all_scores, scores], dim=0)
return count, gold_score_total, gold_words_total, all_scores
if __name__ == "__main__":
main()
| 14,590
| 39.30663
| 117
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/test_amp.py
|
import torch
from apex import amp
from apex.normalization.fused_layer_norm import FusedLayerNorm
torch.cuda.set_device(1)
class NeuralNet(torch.nn.Module):
def __init__(self, d_in, d_out):
self.d_in = d_in
self.d_out = d_out
super().__init__()
self.norm = torch.nn.LayerNorm(d_in)
self.norm2 = FusedLayerNorm(d_out)
# self.norm2 = torch.nn.LayerNorm(d_out)
self.linear = torch.nn.Linear(d_in, d_out)
self.linear2 = torch.nn.Linear(d_out, d_out)
def forward(self, input):
input = self.norm(input)
print(input.type())
output = self.linear(input)
print(output.type())
output = torch.relu(output)
print(output.type())
output = self.norm2(output)
output = self.linear2(output)
print(output.type())
output = torch.nn.functional.log_softmax(output)
print("end")
return output
model = NeuralNet(500, 1000)
model = model.cuda()
loss_function = torch.nn.NLLLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
for i in range(1000):
x = torch.rand(128, 500).cuda()
o = model(x).float()
y = torch.randint(low=0, high=999, size=(128, )).cuda()
loss = loss_function(o, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
optimizer.zero_grad()
| 1,464
| 28.3
| 67
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/get_best.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
import sys
import h5py as h5
import numpy as np
import apex
parser = argparse.ArgumentParser(description='rescore.py')
onmt.markdown.add_md_help_argument(parser)
#
parser.add_argument('-input', required=True,
help='Path to the nbest file')
parser.add_argument('-n_best', type=int, default=1,
help="""n_best value from decoding and rescoring""")
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-coeff', default=[], nargs='+', type=float,
help="Use CUDA on the listed devices.")
def addone(f):
for line in f:
yield line
yield None
def main():
opt = parser.parse_args()
reader = open(opt.input)
out_writer = open(opt.output, 'w')
count = 0
all_sents, all_scores = [], []
for line in addone(reader):
if line is not None:
count += 1
parts = line.strip().split(" ||| ")
text = parts[0]
scores = parts[1].strip().split()
# print(scores)
# print(len(scores))
# assert(len(scores) == len(opt.coeff))
all_sents.append(text)
score = 0
print(count)
for i, score_ in enumerate(scores):
score += opt.coeff[i] * float(score_)
all_scores.append(score)
if count % opt.n_best == 0:
all = zip(all_sents, all_scores)
sorted_all = sorted(all, key=lambda x: x[1], reverse=True)
best_sent = sorted_all[0][0]
out_writer.write(best_sent + "\n")
all_sents = []
all_scores = []
else:
break
out_writer.close()
if __name__ == "__main__":
main()
| 2,055
| 24.382716
| 75
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/average_checkpoints.py
|
from __future__ import division
import onmt
import onmt.markdown
import torch
import argparse
import math
import numpy
from onmt.model_factory import build_model
parser = argparse.ArgumentParser(description='translate.py')
onmt.markdown.add_md_help_argument(parser)
parser.add_argument('-models', required=True,
help='Path to model .pt file')
parser.add_argument('-output', default='model.averaged',
help="""Path to output averaged model""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
parser.add_argument('-method', default='mean',
help="method to average: mean|gmean")
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
# opt.model should be a string of models, split by |
models = opt.models.split("|")
# print(models)
n_models = len(models)
print("Loading main model from %s ..." % models[0])
checkpoint = torch.load(models[0], map_location=lambda storage, loc: storage)
if 'optim' in checkpoint:
del checkpoint['optim']
main_checkpoint = checkpoint
model_opt = checkpoint['opt']
dicts = checkpoint['dicts']
main_model = build_model(model_opt, checkpoint['dicts'])
main_model.load_state_dict(checkpoint['model'])
if opt.cuda:
main_model = main_model.cuda()
for i in range(1, len(models)):
model = models[i]
print("Loading model from %s ..." % models[i])
checkpoint = torch.load(model, map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# delete optim information to save GPU memory
if 'optim' in checkpoint:
del checkpoint['optim']
current_model = build_model(model_opt, checkpoint['dicts'])
current_model.load_state_dict(checkpoint['model'])
if opt.cuda:
current_model = current_model.cuda()
if opt.method == 'mean':
# Sum the parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.add_(param.data)
elif opt.method == 'gmean':
# Take the geometric mean of parameter values
for (main_param, param) in zip(main_model.parameters(), current_model.parameters()):
main_param.data.mul_(param.data)
else:
raise NotImplementedError
# Normalizing
if opt.method == 'mean':
for main_param in main_model.parameters():
main_param.data.div_(n_models)
elif opt.method == 'gmean':
for main_param in main_model.parameters():
main_param.data.pow_(1./n_models)
# Saving
model_state_dict = main_model.state_dict()
save_checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': model_opt,
'epoch': -1,
'iteration' : -1,
'batchOrder' : None,
'optim': None
}
print("Saving averaged model to %s" % opt.output)
torch.save(save_checkpoint, opt.output)
if __name__ == "__main__":
main()
| 3,446
| 26.798387
| 96
|
py
|
NMTGMinor
|
NMTGMinor-master/tools/grad_check_reversible.py
|
import torch.nn as nn
import onmt
import torch
# from onmt.reversible_models.transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
# ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
from onmt.reversible_models.relative_transformers import ReversibleTransformerEncoderLayer, ReversibleEncoderFunction, \
ReversibleTransformerDecoderLayer, ReversibleDecoderFunction
class TestEncoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, pos):
return ReversibleEncoderFunction.apply(input, pos, self.layers, None)
class TestDecoder(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, input, context, pos):
return ReversibleDecoderFunction.apply(input, pos, context, self.layers,
None, None, False, None)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=16,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
parser.add_argument('-test_decoder', action='store_true',
help='Test decoder')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
onmt.constants.weight_norm = False
onmt.constants.checkpointing = False
onmt.constants.max_position_length = 4096
onmt.constants.double_precision = True
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 1
opt.inner_size = 16
bsz = 4
seq_len = 16
input_states = torch.randn(*(seq_len, bsz, opt.model_size*2)).double().cuda()
pos = torch.randn(*(seq_len, 1, opt.model_size)).double().cuda()
pos.requires_grad=False
if not opt.test_decoder:
layers = nn.ModuleList([ReversibleTransformerEncoderLayer(opt) for _ in range(opt.layers)])
# layers.cuda()
net = TestEncoder(layers)
net = net.double().cuda()
print(net)
print("start gradchecking ...")
input_states.requires_grad = True
torch.autograd.gradcheck(net, (input_states, pos))
print("gradchecking completed.")
else:
print("Testing decoder ...")
opt.ignore_source = False
layers = nn.ModuleList([ReversibleTransformerDecoderLayer(opt) for x in range(opt.layers)])
net = TestDecoder(layers)
net = net.double().cuda()
src_seq_len = 8
context = torch.randn(*(src_seq_len, bsz, opt.model_size)).double().cuda()
print("start gradchecking for input and context...")
input_states.requires_grad = True
context.requires_grad = True
torch.autograd.gradcheck(net, (input_states, context, pos))
print("gradchecking completed.")
# context.requires_grad = True
# input.requires
# print("start gradchecking for context...")
# input_states.requires_grad = True
# torch.autograd.gradcheck(net, (input_states, context))
# print("gradchecking completed.")
| 3,353
| 30.641509
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_cmatmul.py
|
import torch
from time import time
B = 16384
N_in = 1024
N_out = 4096
num_iters = 200
x = torch.randn(B, N_in, dtype=torch.cfloat, requires_grad=True)
r = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
i = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
print(r.type())
r.data.copy_(x.real.data)
i.data.copy_(x.imag.data)
x = x.cuda()
r = r.cuda()
i = i.cuda()
x_2 = torch.randn(N_in, N_out, dtype=torch.cfloat, requires_grad=True)
r_2 = torch.randn(N_in, N_out, dtype=torch.float, requires_grad=True)
i_2 = torch.randn(N_in, N_out, dtype=torch.float, requires_grad=True)
r_2.data.copy_(x_2.real.data)
i_2.data.copy_(x_2.imag.data)
x_2 = x_2.cuda()
r_2 = r_2.cuda()
i_2 = i_2.cuda()
a = torch.mm(x, x_2)
with torch.no_grad():
a = torch.mm(x, x_2)
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
print(a.real - a_r)
print(a.imag - a_i)
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
(a_r.sum() + a_i.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPseudo CMATMUL fp32 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
a = torch.mm(x, x_2)
(a.real.sum() + a.imag.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch CMATMUL fp32 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
with torch.cuda.amp.autocast(enabled=True):
for _ in range(num_iters):
a_r = torch.mm(r, r_2) - torch.mm(i, i_2)
a_i = torch.mm(r, i_2) + torch.mm(i, r_2)
(a_r.sum() + a_i.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPseudo CMATMUL fp16 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
with torch.cuda.amp.autocast(enabled=True):
for _ in range(num_iters):
a = torch.mm(x, x_2)
(a.real.sum() + a.imag.sum()).backward()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch CMATMUL fp16 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
| 2,405
| 20.872727
| 91
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_factorize_linear.py
|
import torch
import torch.nn.functional as F
from time import time
N_in = 1024
N_out = 4096
B = 16384
num_iters = 512
x = torch.randn(B, N_in, dtype=torch.float, requires_grad=True)
W = torch.randn(N_out, N_in, dtype=torch.float, requires_grad=True)
b = torch.randn(N_out, dtype=torch.float, requires_grad=True)
x = x.cuda()
W = W.cuda()
b = b.cuda()
y = F.linear(x, W, b)
y.sum().backward()
y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
y2.sum().backward()
print(y - y2)
r = torch.randn(1, N_in, dtype=torch.float, requires_grad=True)
s = torch.randn(1, N_out, dtype=torch.float, requires_grad=True)
r = r.cuda()
s = s.cuda()
y1 = F.linear(x, torch.mul(W, torch.mm(s.t(), r)), b)
# y2 = torch.mul(torch.mm(torch.mul(x, r)), s) + b.unsqueeze(0)
y2 = torch.mm(x * r, W.transpose(0, 1)) * s + b.unsqueeze(0)
print("Checking ")
print(y1.sum() / (B * N_out), y2.sum() / (B * N_out))
# print(torch.allclose(y1, y2, rtol=1e-05, atol=1e-08))
rank = 1
n_languages = 1024
r_table = torch.Tensor(n_languages, rank, N_in)
s_table = torch.Tensor(n_languages, rank, N_out)
# indices: [T x B x n_languages]
# r_output: T x B x rank x N_in
# s_output: T x B x rank x N_out
# apply the above equation. torch.mm(x * r, W.transpose(0, 1)) * s + b.unsqueeze(0)
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
#
# for _ in range(num_iters):
# y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
# y2.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
#
# print(F"\nPseudo CMATMUL fp32 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
# for _ in range(num_iters):
# y = F.linear(x, W, b)
# y.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPytorch CMATMUL fp32 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
# with torch.cuda.amp.autocast(enabled=True):
# for _ in range(num_iters):
# y = F.linear(x, W, b)
# y.sum().backward()
#
#
# torch.cuda.synchronize()
# stop_time = time()
#
# print(F"\nPytorch CMATMUL fp16 time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
#
#
# torch.cuda.profiler.start()
# torch.cuda.synchronize()
# start_time = time()
#
#
# with torch.cuda.amp.autocast(enabled=True):
# for _ in range(num_iters):
# y2 = torch.mm(x, W.transpose(0, 1)) + b.unsqueeze(0)
# y2.sum().backward()
#
# torch.cuda.synchronize()
# stop_time = time()
# print(F"\nPseudo CMATMUL fp16 {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
# print("-----------------------------------------------------------------------------")
| 3,043
| 24.579832
| 93
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_self_attention_blaslt.py
|
import torch
import unittest
from modules.self_multihead_attn import SelfMultiheadAttn
from time import time
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 8
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='default')
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='fast')
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
# print(mask)
for i in range(20):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-3, rtol=1e-3))
self.assertTrue(not torch.any(torch.isnan(self.tst_inputs.grad)))
self.assertTrue(torch.allclose(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3))
self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3))
def test_speed(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.tst_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nC++ Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 5,583
| 43.672
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_rotation.py
|
import torch
import torch
from torch import nn, einsum
from einops import rearrange, repeat
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
"""
:param x: [bsz x time x hidden]
:return:
"""
# actually this module doesn't care about anything of x except x.size(1)
n = x.shape[0] # time dimension
t = torch.arange(n, device=x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb
def rotate_every_two(x):
# splits the last dimension in half
x = rearrange(x, '... (d j) -> ... d j', j=2)
x1, x2 = x.unbind(dim=-1)
# stack negative x2 with x1
x = torch.stack((-x2, x1), dim=-1)
return rearrange(x, '... d j -> ... (d j)')
def rotate_backward(dx):
dx = rearrange(dx, '... (d j) -> ... d j', j=2)
dx2, dx1 = dx.unbind(dim=-1)
dx = torch.stack((dx1, -dx2), dim=-1)
dx = rearrange(dx, '... d j -> ... (d j)')
return dx
# more like encodings because the position values are not learnablew weights
def apply_rotary_emb(q, sinu_pos):
"""
:param q: [bsz x time x hidden]
:param k: [bsz x time x hidden]
:param sinu_pos:
:return: q and k with applied position encoding
"""
# splits the last dimension of the sinu_pos in half and grab sin and cos terms
sinu_pos = rearrange(sinu_pos, 'n (j d) -> n j d', j=2)
sin, cos = sinu_pos.unbind(dim=-2)
# repeat the sin and cos terms with 2?
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j=2), (sin, cos))
# q' = (q * cos) + (rotate_every_two(q) * sin)
# dl_dq = dl_dq' * (cos + sin * rotate'(q))
print(q.size(), cos.size(), sin.size())
q = q * cos.unsqueeze(1) + rotate_every_two(q) * sin.unsqueeze(1)
# q = rotate_every_two(q) # * sin
# y = g(x) * a
# dy/dx = dy/dg * dg/dx = a *
# q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, sin, cos
BH = 1024 * 8
B = 1024
H = BH // B
Q = 75
K = 56
D = 64
pos_encoder = SinusoidalEmbeddings(D)
pos_encoder.cuda()
# create input
x = torch.randn((BH, Q, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=True)
# create the pos emb
pos_emb = pos_encoder(x)
rotate_grad = torch.Tensor([1, -1] * int(D / 2)).to(x.device)
rotate_grad = rotate_grad.unsqueeze(0).unsqueeze(1).repeat(BH, Q, 1)
#
r_x = rotate_every_two(x)
#
loss = r_x.sum() * 1
#
loss.backward()
#
print(x.grad - rotate_grad)
x.grad = None
x = torch.randn((Q, BH, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=True)
grad_rx = torch.randn((Q, BH, D), dtype=torch.float32, device=torch.device("cuda"), requires_grad=False)
pos_emb = pos_encoder(x)
rotary_emb_x, sin, cos = apply_rotary_emb(x, pos_emb)
rotary_emb_x.backward(grad_rx)
print(x.grad)
rotate_grad = rotate_backward(x.new_ones(x.shape))
# grad_x = (cos + rotate_grad * sin) * grad_rx
grad_x = cos.unsqueeze(1) * grad_rx + rotate_backward(sin.unsqueeze(1) * grad_rx)
print(x.grad - grad_x)
| 3,312
| 25.293651
| 104
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_fmha.py
|
###############################################################################
# Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
###############################################################################
import sys
import torch
import numpy as np
import unittest
import math
import fmhalib_sm86 as mha
from time import time
from random import randint
from torch.cuda.amp import custom_fwd, custom_bwd
# CONDITION to use fast mha:
# length <= 512 and sm=80
class IndexCopy(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, input, non_pad_indices, total_batch_size):
sizes = list(input.size())
sizes[0] = total_batch_size
output = input.new_zeros(*sizes)
output.index_copy_(0, non_pad_indices, input)
ctx.save_for_backward(non_pad_indices)
return output
@staticmethod
@custom_bwd
def backward(ctx, output_grads):
non_pad_indices, = ctx.saved_tensors
grad_input = output_grads.index_select(0, non_pad_indices)
return grad_input, None, None
def py_mha(qkv, amask, b, s, h, d, high_precision=True):
qkv = qkv.view(b, s, h, 3, d)
q = qkv[:, :, :, 0, :].permute(0, 2, 1, 3)
k = qkv[:, :, :, 1, :].permute(0, 2, 1, 3)
v = qkv[:, :, :, 2, :].permute(0, 2, 1, 3)
if high_precision:
p = torch.matmul(q.float(), k.permute(0, 1, 3, 2).float())
p_masked = p / math.sqrt(d) + (amask) * -10000.0
s = torch.softmax(p_masked, -1).to(qkv.dtype)
ctx = torch.matmul(s, v)
else:
p = torch.matmul(q, k.permute(0, 1, 3, 2))
p_masked = p / math.sqrt(d) + (amask) * -10000.0
s = torch.softmax(p_masked, -1).to(qkv.dtype)
ctx = torch.matmul(s, v)
ctx = ctx.permute(0, 2, 1, 3).contiguous()
ctx.retain_grad()
return ctx
class TestFMHA(unittest.TestCase):
def run_uneven_test(self, s, b):
s = randint(s-127, s)
print(f'Test uneven s={s} b={b}')
torch.manual_seed(12341234)
torch.cuda.manual_seed(12341234)
dtype = torch.float16
device = torch.device('cuda')
h = 16
d = 64
amask = torch.ones(b, s, dtype=dtype, device=device)
slens = []
prev_size = -1
for b_ in range(b):
if prev_size == -1:
curr_size = randint(1, s)
slens.append(curr_size)
prev_size = curr_size
else:
# no sort?
curr_size = randint(1, s)
slens.append(curr_size)
prev_size = curr_size
amask[b_, :prev_size].fill_(0) # the first prev_size elements have no mask
max_s = max(slens)
non_pad_indices = torch.nonzero(amask.view(-1).ne(1)).squeeze(1)
a = torch.tensor(np.array([0] + slens), dtype=torch.int32)
amask = amask.unsqueeze(1).unsqueeze(1)
seqlens = torch.tensor(slens, dtype=torch.int32, device=device)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=device)
total = cu_seqlens[-1].item()
# input for python mha?
# should be identical layout with the current code
qkv = torch.randn((b, s, h, 3, d), device=device, dtype=dtype)
def run_fmha_forward(qkv_, non_pad_indices_, cu_seqlens_, max_s_):
qkv_vs = qkv_.permute(0, 1, 3, 2, 4).contiguous().view(b * s, 3, h, d)
qkv_vs = qkv_vs.index_select(0, non_pad_indices_)
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens_, 0.0, max_s_, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens_, 0.0, max_s_, True, None)
ctx.requires_grad = True
ctx_out = IndexCopy.apply(ctx, non_pad_indices, b * s)
ctx_out = ctx_out.view(b, s, h, d)
return qkv_vs, ctx, ctx_out, S_
def run_mha_backward(grad, ctx_out_, ctx_, qkv_vs_, non_pad_indices_, cu_seqlens_, max_s_, S__):
ctx_out.backward(grad, inputs=[ctx_])
if b < 4:
dqkv2, _, _ = mha.bwd_nl(ctx.grad, qkv_vs_, S__, cu_seqlens_, 0.0, max_s_)
else:
dqkv2, _ = mha.bwd(ctx.grad, qkv_vs_, S__, cu_seqlens_, 0.0, max_s_)
dqkv2 = dqkv2.permute(0, 2, 1, 3) # [b*s, 3, h, d]
return dqkv2
qkv_vs, ctx, ctx_out, S_ = run_fmha_forward(qkv, non_pad_indices, cu_seqlens, max_s)
qkv.requires_grad = True
ctx_ref = py_mha(qkv, amask, b, s, h, d)
mask = amask.squeeze(1).squeeze(1).bool().unsqueeze(-1).unsqueeze(-1)
ctx_ref.masked_fill_(mask, 0)
self.assertTrue(torch.allclose(ctx_ref.float(), ctx_out.float(), atol=1e-2))
print("output ok.")
labels = torch.randn_like(ctx_ref)
diff = ctx_ref - labels
l = (diff * diff).sum() / b
l.backward(inputs=[ctx_ref, qkv])
dw = ctx_ref.grad # .permute(0, 2, 1, 3)
dw2 = dw.clone().detach().contiguous()
dqkv2 = run_mha_backward(dw2, ctx_out, ctx, qkv_vs, non_pad_indices, cu_seqlens, max_s, S_)
qkv_grad = qkv.grad.view(b * s, h, 3, d)
qkv_grad = qkv_grad.index_select(0, non_pad_indices)
if not torch.allclose(qkv_grad.float(), dqkv2.float(), atol=1e-3):
print(qkv_grad.float() - dqkv2.float())
self.assertTrue(torch.allclose(qkv_grad.float(), dqkv2.float(), atol=1e-2))
print("grad ok.")
num_iters = 20
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
qkv_vs, ctx, ctx_out, S_ = run_fmha_forward(qkv, non_pad_indices, cu_seqlens, max_s)
dw2 = torch.randn_like(ctx_out)
dqkv2 = run_mha_backward(dw2, ctx_out, ctx, qkv_vs, non_pad_indices, cu_seqlens, max_s, S_)
torch.cuda.synchronize()
stop_time = time()
print(F"Fused MHA MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ctx_ref = py_mha(qkv, amask, b, s, h, d, high_precision=False)
labels = torch.randn_like(ctx_ref)
ctx_ref.backward(labels)
torch.cuda.synchronize()
stop_time = time()
print(F"Python MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
def run_test(self, s, b):
#s = randint(s - 127, s)
s = s
print(f'Test s={s} b={b}')
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
dtype = torch.float16
device = torch.device('cuda')
h = 16
d = 64
slens = [s] * b
a = torch.tensor(np.array([0] + slens), dtype=torch.int32)
amask = torch.zeros(b, h, s, s, dtype=dtype, device=device)
seqlens = torch.tensor(slens, dtype=torch.int32, device=device)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=device)
total = cu_seqlens[-1].item()
# input for python mha?
qkv = torch.randn((b, s, h, 3, d), device=device, dtype=dtype)
# input for fmha
qkv_vs = qkv.permute(0, 1, 3, 2, 4).contiguous().view(b * s, 3, h, d)
qkv.requires_grad = True
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens, 0.0, s, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, None)
ctx = ctx.view(b, s, h, d)
ctx_ref = py_mha(qkv, amask, b, s, h, d)
print(ctx_ref.float() -ctx.float() )
self.assertTrue(torch.allclose(ctx_ref.float(), ctx.float(), atol=1e-2))
labels = torch.randn_like(ctx_ref)
diff = ctx_ref - labels
l = (diff * diff).sum() / b
l.backward()
dw = ctx_ref.grad.permute(0, 2, 1, 3)
dw2 = dw.permute(0, 2, 1, 3).clone().detach().contiguous()
if b < 4:
dqkv2, _, _ = mha.bwd_nl(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
else:
dqkv2, _ = mha.bwd(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
dqkv2 = dqkv2.permute(0, 2, 1, 3).view(b, s, h, 3, d)
# print(qkv.grad.float() - dqkv2.float())
self.assertTrue(torch.allclose(qkv.grad.float(), dqkv2.float(), atol=1e-2))
num_iters = 20
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
if b < 4:
ctx, S_ = mha.fwd_nl(qkv_vs, cu_seqlens, 0.0, s, True, None)
else:
ctx, S_ = mha.fwd(qkv_vs, cu_seqlens, 0.0, s, True, None)
dw2 = torch.randn_like(ctx)
if b < 4:
dqkv2, _, _ = mha.bwd_nl(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
else:
dqkv2, _ = mha.bwd(dw2, qkv_vs, S_, cu_seqlens, 0.0, s)
torch.cuda.synchronize()
stop_time = time()
print(F"Fused MHA MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
torch.cuda.synchronize()
start_time = time()
for _ in range(num_iters):
ctx_ref = py_mha(qkv, amask, b, s, h, d, high_precision=False)
labels = torch.randn_like(ctx_ref)
ctx_ref.backward(labels)
torch.cuda.synchronize()
stop_time = time()
print(F"Python MLP time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.stop()
# def test_128(self):
# self.run_test(128, 55)
# self.run_test(128, 47)
# self.run_test(128, 90)
#
# self.run_uneven_test(128, 55)
# self.run_uneven_test(128, 47)
# self.run_uneven_test(128, 90)
# self.run_test(128, 3)
# self.run_uneven_test(128, 3)
#
# def test_256(self): # 129 - 256?
# #
# self.run_test(256, 32)
# self.run_test(256, 16)
# self.run_test(224, 16)
# self.run_test(224, 3)
# #
# self.run_uneven_test(256, 32)
# self.run_uneven_test(256, 16)
# self.run_uneven_test(224, 16)
# self.run_uneven_test(224, 3)
#
# def test_384(self):
# self.run_test(384, 32)
# self.run_test(384, 16)
# self.run_test(384, 8)
# #
# self.run_uneven_test(384, 32)
# self.run_uneven_test(384, 16)
# self.run_uneven_test(384, 8)
# self.run_test(384, 3)
#
# def test_512(self):
# self.run_test(512, 1)
# self.run_test(512, 2)
# self.run_test(512, 3)
# self.run_uneven_test(512, 32)
# self.run_uneven_test(512, 2)
# self.run_uneven_test(512, 3)
#
# def test_768(self):
# self.run_test(768, 1)
# self.run_test(768, 2)
# self.run_test(768, 3)
# self.run_test(768, 32)
# self.run_test(768, 64)
# self.run_uneven_test(768, 32)
# self.run_uneven_test(768, 64)
# self.run_uneven_test(768, 1)
# self.run_uneven_test(768, 2)
# self.run_uneven_test(768, 3)
def test_896(self):
l = 512
self.run_test(l, 1)
self.run_test(l, 2)
self.run_test(l, 3)
self.run_test(l, 32)
self.run_test(l, 64)
self.run_uneven_test(l, 32)
self.run_uneven_test(l, 64)
self.run_uneven_test(l, 1)
self.run_uneven_test(l, 2)
self.run_uneven_test(l, 3)
# def test_896(self):
# l = 896
# self.run_test(l, 1)
# self.run_test(l, 2)
# self.run_test(l, 3)
# self.run_test(l, 32)
# self.run_test(l, 64)
# self.run_uneven_test(l, 32)
# self.run_uneven_test(l, 64)
# self.run_uneven_test(l, 1)
# self.run_uneven_test(l, 2)
# self.run_uneven_test(l, 3)
#
# def test_768(self):
# self.run_test(768, 4)
# self.run_test(768, 2)
# self.run_test(768, 32)
# self.run_uneven_test(768, 32)
# self.run_uneven_test(768, 3)
# def test_896(self):
# self.run_test(896, 112)
# self.run_test(896, 32)
# self.run_test(896, 2)
# self.run_uneven_test(896, 32)
# self.run_uneven_test(896, 16)
# self.run_uneven_test(896, 3)
# def test_1024(self):
# self.run_test(1024, 4)
# self.run_uneven_test(1024, 32)
# self.run_test(640, 2)
# self.run_test(512, 3)
# self.run_uneven_test(1024, 32)
# self.run_uneven_test(1024, 2)
# self.run_uneven_test(1024, 3)
#
if __name__ == '__main__':
unittest.main()
| 14,170
| 32.343529
| 104
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_flattened_weight.py
|
import torch
import torch.nn.functional as F
from time import time
class ParameterRef(object):
def __init__(self, weight_buf, offset, length, size):
self.weight_buf = weight_buf
self.offset = offset
self.length = length
self.size = size
def __call__(self):
return self.weight_buf[self.offset:self.offset+self.length].view(*self.size)
def find_weight(m, _weight_list):
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if type(target_attr) == torch.nn.Parameter:
weight = target_attr
if weight.ndim == 2:
_weight_list.append(weight)
for n, ch in m.named_children():
find_weight(ch, _weight_list)
return _weight_list
def flatten_weight(m, _weight_buf, _offset):
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if type(target_attr) == torch.nn.Parameter:
weight = target_attr
size = weight.size()
numel = weight.numel()
_weight_buf.data[offset:offset+numel].copy_(weight.data.view(-1))
# print(_weight_buf[offset:offset+numel].view_as(weight))
setattr(m, attr_str, None)
del m._parameters[attr_str]
setattr(m, attr_str, ParameterRef(_weight_buf, _offset, numel, size))
_offset = _offset + numel
del weight
for n, ch in m.named_children():
_offset = find_weight(ch, _weight_buf, _offset)
return _offset
class MLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_hiddens=2):
super(MLP, self).__init__()
self.weight_buf = None
self.input_weight = torch.nn.Parameter(torch.randn(hidden_size, input_size))
self.hidden_weight = torch.nn.Parameter(torch.randn(hidden_size, hidden_size))
self.output_weight = torch.nn.Parameter(torch.randn(output_size, hidden_size))
def set_buffer(self, _weight_buf):
self.weight_buf = _weight_buf
def forward(self, x):
try:
x = F.linear(x, self.input_weight, None)
x = torch.relu(x)
x = F.linear(x, self.hidden_weight, None)
x = torch.relu(x)
x = F.linear(x, self.output_weight, None)
except TypeError as e:
x = F.linear(x, self.input_weight(), None)
x = torch.relu(x)
x = F.linear(x, self.hidden_weight(), None)
x = torch.relu(x)
x = F.linear(x, self.output_weight(), None)
return x
mlp = MLP(1024, 4096, 1024).cuda()
x = torch.rand(128, 1024).cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for i in range(32):
y = mlp(x)
y.sum().backward()
mlp.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch default MLP time {(stop_time - start_time) * 1000. / 32:.4f} ms")
weight_list = list()
find_weight(mlp, weight_list)
numels = sum([w.numel() for w in weight_list])
weight_buf = torch.nn.Parameter(torch.zeros(numels)).cuda()
offset = 0
with torch.no_grad():
offset = flatten_weight(mlp, weight_buf, offset)
print(offset)
mlp.set_buffer(weight_buf)
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
for i in range(32):
y = mlp(x)
y.sum().backward()
mlp.zero_grad()
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch flattened MLP time {(stop_time - start_time) * 1000. / 32:.4f} ms")
| 3,501
| 25.330827
| 86
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_multi_linear.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
len_q = 20
input_dim = 128
heads = 8
head_dim = input_dim // heads
output_dim = input_dim
k_proj = nn.Linear(input_dim, input_dim, bias=True)
v_proj = nn.Linear(input_dim, input_dim, bias=True)
q_proj = nn.Linear(input_dim, input_dim, bias=True)
# weight = Parameter(torch.Tensor(3 * input_dim, input_dim))
weight_t = torch.Tensor(3 * input_dim, input_dim)
bias_t = torch.Tensor(3 * input_dim)
# weight_t = weight_t.reshape(head_dim, 3, heads, input_dim)
w_q = q_proj.weight.clone()
w_k = k_proj.weight.clone()
w_v = v_proj.weight.clone()
print(torch.allclose(w_q, q_proj.weight))
weights = [w_q, w_k, w_v]
# with torch.no_grad():
# weight_t[:, 0, :, :].reshape(input_dim, input_dim).copy_(q_proj.weight)
# weight_t[:, 1, :, :].reshape(input_dim, input_dim).copy_(k_proj.weight)
# weight_t[:, 2, :, :].reshape(input_dim, input_dim).copy_(v_proj.weight)
weight_ = torch.cat(weights, dim=0).contiguous()
b_q = q_proj.bias.clone()
b_k = k_proj.bias.clone()
b_v = v_proj.bias.clone()
biases = [b_q, b_k, b_v]
bias_ = torch.cat(biases, dim=0).contiguous()
weight_ = weight_.reshape(3 * head_dim * heads, input_dim).view(3, heads, head_dim, input_dim).transpose(0, 1).reshape(-1, input_dim)
bias_ = bias_.reshape(3 * head_dim * heads).view(3, heads, head_dim).transpose(0, 1).reshape(-1)
# weight_t = weight_t.reshape(3 * input_dim, input_dim)
weight_t.copy_(weight_)
bias_t.copy_(bias_)
weight = Parameter(weight_t)
bias = Parameter(bias_t)
bsz = 16
input = torch.randn(len_q, bsz, input_dim)
q_proj = q_proj.cuda()
k_proj = k_proj.cuda()
v_proj = v_proj.cuda()
weight = weight.cuda()
bias = bias.cuda()
input = input.cuda()
q = q_proj(input).view(len_q, bsz * heads, head_dim)
k = k_proj(input).view(len_q, bsz * heads, head_dim)
v = v_proj(input).view(len_q, bsz * heads, head_dim)
all = F.linear(input, weight, bias)
# all = all.view(len_q, bsz, 3, heads, head_dim)
#
# q_ = all[:, :, 0, :,:].reshape(len_q, bsz * heads, head_dim)
# k_ = all[:, :, 1, :,:].reshape(len_q, bsz * heads, head_dim)
# v_ = all[:, :, 2, :,:].reshape(len_q, bsz * heads, head_dim)
# all = all.view(len_q, bsz, 3, heads, head_dim).transpose(2, 3).contiguous()
all = all.view(len_q, bsz * heads, 3, head_dim)
q_ = all[:, :, 0, :] # .view(len_q, bsz * heads, head_dim)
k_ = all[:, :, 1, :] # .view(len_q, bsz * heads, head_dim)
v_ = all[:, :, 2, :] # .view(len_q, bsz * heads, head_dim)
# print(q - q_)
print("begin testing ...")
print(torch.allclose(q, q_))
print(torch.allclose(k, k_))
print(torch.allclose(v, v_))
# q_ = q.view(bsz * heads, head_dim)
# k_ = k.view(bsz * heads, head_dim)
# matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
# device=queries.device)
# matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
# keys.transpose(0, 1).transpose(1, 2),
# out=matmul1_results, beta=0.0, alpha=scale_t[0])
o = torch.bmm
| 3,110
| 32.095745
| 133
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_self_attention.py
|
import torch
import unittest
from modules.self_multihead_attn import SelfMultiheadAttn
from time import time
class SelfMultiheadAttnTest(unittest.TestCase):
def setUp(self, seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.seq_length = 512
self.sequences = 8
self.hidden_dim = 1024
self.heads = 16
self.dropout_prob = 0.0
self.ref_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='default')
self.ref_layer.cuda().half()
self.ref_layer.reset_parameters()
self.ref_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
# Reset seed so parameters are identical
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
self.tst_layer = SelfMultiheadAttn(self.hidden_dim,
self.heads,
dropout=self.dropout_prob,
bias=True,
mask_additive=True,
impl='fast')
self.tst_layer.cuda().half()
self.tst_layer.reset_parameters()
self.tst_inputs = torch.randn(self.seq_length, self.sequences, self.hidden_dim,
dtype=torch.float16, device=torch.device("cuda")).requires_grad_(True)
def test_self_multihead_attn_additive_mask(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
# print(mask)
for i in range(20):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
self.tst_inputs.backward(grads)
self.assertTrue(torch.allclose(self.ref_inputs, self.tst_inputs, atol=1e-3, rtol=1e-3))
self.assertTrue(not torch.any(torch.isnan(self.tst_inputs.grad)))
self.assertTrue(torch.allclose(ref_outputs, tst_outputs, atol=1e-3, rtol=1e-3))
self.assertTrue(torch.allclose(self.ref_inputs.grad, self.tst_inputs.grad, atol=1e-3, rtol=1e-3))
def test_speed(self):
grads = torch.randn_like(self.tst_inputs)
mask = ((torch.randn(self.sequences, self.seq_length) > 0) * -10000.0).half().cuda()
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
ref_outputs, _ = self.ref_layer.forward(self.ref_inputs,
self.ref_inputs,
self.ref_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.ref_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nPytorch Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
torch.cuda.profiler.start()
torch.cuda.synchronize()
start_time = time()
num_iters = 100
for i in range(num_iters):
tst_outputs, _ = self.tst_layer.forward(self.tst_inputs,
self.tst_inputs,
self.tst_inputs,
key_padding_mask=mask,
need_weights=False,
attn_mask=None,
is_training=True)
self.tst_inputs.backward(grads)
torch.cuda.synchronize()
stop_time = time()
print(F"\nC++ Self ATTN time {(stop_time - start_time) * 1000. / num_iters:.4f} ms")
if __name__ == '__main__':
unittest.main()
| 5,583
| 43.672
| 109
|
py
|
NMTGMinor
|
NMTGMinor-master/test/test_softmax.py
|
import torch
import mask_softmax_dropout_cuda
import copy
BH = 1024 * 8
B = 1024
H = BH // B
Q = 75
K = 56
x = torch.randn((BH, Q, K) , dtype=torch.float16, device=torch.device("cuda"), requires_grad=True) * 100
x_ref = x.clone().detach().requires_grad_(True)
grado = torch.randn((BH, Q, K), dtype=torch.float16, device=torch.device("cuda"), requires_grad=True)
dropout_mask, softmax_results = mask_softmax_dropout_cuda.forward(True, 8, x, 0.0)
pytorch_output = torch.nn.functional.softmax(x_ref, dim=-1, dtype=torch.float32).type_as(x)
dif = softmax_results - pytorch_output
print(dif)
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(softmax_results, pytorch_output, atol=1e-3, rtol=1e-3)
print(result)
print("Checking gradients ...")
grado2 = copy.deepcopy(grado)
grado3 = copy.deepcopy(grado)
pytorch_output.backward(grado)
gradx_ref = x_ref.grad
gradx = mask_softmax_dropout_cuda.backward(8, grado, softmax_results, dropout_mask, 0.0)
gradx2 = mask_softmax_dropout_cuda.backward_recompute(8, grado2, softmax_results, x, dropout_mask, 0.0)
dif = gradx - gradx_ref
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx, gradx_ref, atol=1e-3, rtol=1e-3)
print(result)
dif = gradx2 - gradx_ref
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx2, gradx_ref, atol=1e-3, rtol=1e-3)
print(result)
dif = gradx2 - gradx
print(dif.double().sum().div_(x.numel()))
result = torch.allclose(gradx2, gradx, atol=1e-3, rtol=1e-3)
print(result)
| 1,510
| 24.610169
| 104
|
py
|
NMTGMinor
|
NMTGMinor-master/test/modules/fast_self_multihead_attn_func.py
|
import torch
# import fast_self_multihead_attn
# import fast_self_multihead_attn_bias
# import fast_self_multihead_attn_bias_additive_mask
import self_multihead_attn_cuda as fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases,
output_biases, pad_mask, mask_additive, dropout_prob):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
mask_additive_t = torch.tensor([mask_additive])
input_lin_results, \
bmm1_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias_additive_mask.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
null_tensor, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias_additive_mask.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
bmm1_results, \
pad_mask, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, \
input_grads, input_weight_grads, output_weight_grads, input_bias_grads, output_bias_grads, \
None, None, None
fast_self_attn_func = FastSelfAttnFunc.apply
| 3,260
| 32.96875
| 108
|
py
|
NMTGMinor
|
NMTGMinor-master/test/modules/self_multihead_attn.py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from .self_multihead_attn_func import self_attn_func
from .fast_self_multihead_attn_func import fast_self_attn_func
# from .fast_self_multihead_attn_norm_add_func import fast_self_attn_norm_add_func
# from apex.normalization.fused_layer_norm import FusedLayerNorm
# from onmt.modules.layer_norm import LayerNorm
if hasattr(torch._C, '_jit_set_profiling_executor'):
torch._C._jit_set_profiling_executor(False)
if hasattr(torch._C, '_jit_set_profiling_mode'):
torch._C._jit_set_profiling_mode(False)
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=True)
out = residual + out
return out
class SelfMultiheadAttn(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=False, impl='fast',
mask_additive=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.bias = bias
self.impl = impl
self.scaling = self.head_dim ** -0.5
self.mask_additive = mask_additive
if mask_additive:
assert impl == 'default' or (
impl == 'fast' and bias), "additive mask not supported for fast mode without bias"
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.out_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj_bias = Parameter(torch.Tensor(embed_dim))
self.reset_parameters()
if impl == 'fast':
self.attn_func = fast_self_attn_func
elif impl == 'default':
self.attn_func = self_attn_func
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj_weight)
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj_bias, 0.)
def forward(self, query, key, value, key_padding_mask=None, need_weights=False, attn_mask=None, is_training=True):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
input_bias = self.in_proj_bias
input_weights = self.in_proj_weight
if key_padding_mask is not None:
assert (attn_mask is None), "ERROR attn_mask and key_padding_mask should not be both defined!"
mask = key_padding_mask
elif attn_mask is not None:
assert self.mask_additive == False, "additive mask not supported for time mask"
mask = attn_mask
else:
mask = None
if self.impl == 'fast':
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, query,
input_weights, self.out_proj_weight, input_bias, self.out_proj_bias, mask,
self.mask_additive, self.dropout)
else:
outputs = self.attn_func(attn_mask is not None, is_training, self.num_heads, self.scaling, query,
input_weights, self.out_proj_weight,
input_bias, self.out_proj_bias,
mask, self.mask_additive, self.dropout)
return outputs, None
| 4,111
| 39.712871
| 118
|
py
|
NMTGMinor
|
NMTGMinor-master/test/modules/self_multihead_attn_func.py
|
"""
Self-attention with multi-head attention.
Code is taken from apex self-attention implementation
https://github.com/NVIDIA/apex/tree/master/apex/contrib/csrc/multihead_attn
"""
import torch
import torch.nn.functional as F
from torch.cuda.amp import custom_fwd, custom_bwd
try:
import mask_softmax_dropout_cuda
except (ModuleNotFoundError, ImportError) as e:
mask_softmax_dropout_cuda = None
try:
import self_multihead_attn_cuda
except (ModuleNotFoundError, ImportError) as e:
self_multihead_attn_cuda = None
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0
def apply_rotary_pos_emb(q, k, cos, sin):
return (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
def rotate_backward(dx):
dx2, dx1 = dx[..., :dx.shape[-1] // 2], dx[..., dx.shape[-1] // 2:]
return torch.cat((dx1, -dx2), dim=dx1.ndim - 1)
class SelfAttnFunc(torch.autograd.Function):
@staticmethod
# @custom_fwd(cast_inputs=torch.float16)
def forward(ctx, use_time_mask, is_training, heads, inputs,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout_prob,
rotary_pos_enc, pos_emb):
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
head_dim = inputs.size(2) // heads
scale_t = torch.tensor([head_dim ** -0.5])
ctx.rotary_pos_enc = rotary_pos_enc
ctx.return_coverage = return_coverage
ctx.low_precision = low_precision
bsz, len_q = inputs.size(1), inputs.size(0)
if low_precision and self_multihead_attn_cuda is not None and not incremental and len_q <= 2048 and not use_time_mask \
and inputs.type() == 'torch.cuda.HalfTensor' and not rotary_pos_enc:
ctx.fused = True
use_mask = True
if mask is not None:
mask = mask.half() * -16384
else:
mask = inputs.new(bsz, len_q).zero_() # works
input_lin_results, \
attn_scores, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = self_multihead_attn_cuda.forward(use_mask, False, is_training, heads,
inputs.contiguous(), input_weights, output_weights,
input_biases, output_biases,
mask, dropout_prob)
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
attn_scores,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
mask)
return outputs, dropout_results
ctx.fused = False
# Input Linear GEMM
# input1: (activations) [seql_q, seqs, embed_dim(1024)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)] (transpose [0,1])
# output: [seql_q, seqs, embed_dim*3]
# GEMM: ( (seql_q*seqs) x embed_dim ) x ( embed_dim x embed_dim*3 ) = (seql_q*seqs x embed_dim*3)
input_lin_results = torch.addmm(input_biases,
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
input_weights.transpose(0, 1),
beta=1., alpha=1.)
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1), input_weights.size(0))
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads, 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
if incremental:
keys = keys.contiguous().view(len_q, bsz, heads * head_dim)
values = values.contiguous().view(len_q, bsz, heads * head_dim)
if 'k' in incremental_cache and 'v' in incremental_cache:
keys = torch.cat([incremental_cache['k'], keys], dim=0) # time first
incremental_cache['k'] = keys
values = torch.cat([incremental_cache['v'], values], dim=0) # time first
incremental_cache['v'] = values
else:
incremental_cache['k'] = keys
incremental_cache['v'] = values
keys = keys.view(-1, bsz * heads, head_dim)
values = values.view(-1, bsz * heads, head_dim)
len_k = keys.size(0)
# apply rotary position encodings
if rotary_pos_enc:
assert pos_emb is not None and pos_emb is not None
cos, sin = pos_emb
queries_, keys_ = apply_rotary_pos_emb(queries, keys, cos, sin)
queries.copy_(queries_)
keys.copy_(keys_)
else:
sin, cos = null_tensor, null_tensor
# Matmul1 Batched GEMMs
# The output tensor is specified prior to the Batch GEMM because baddbmm requires its specification
# baddbmm is used to apply the scale parameter via the Batched GEMM's alpha parameter instead of
# a separate elementwise operation.
# Input1: (Queries) [seql_q, seqs*heads, head_dim] tranpose(0,1)
# Input2: (Keys) [seql_k, seqs*heads, head_dim] transpose(0,1)
# output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
if queries.is_cuda:
matmul1_results = torch.empty((queries.size(1), queries.size(0), keys.size(0)), dtype=queries.dtype,
device=queries.device)
matmul1_results = torch.baddbmm(matmul1_results, queries.transpose(0, 1),
keys.transpose(0, 1).transpose(1, 2),
out=matmul1_results, beta=0.0, alpha=scale_t[0])
else:
matmul1_results = torch.matmul(queries.transpose(0, 1), keys.transpose(0, 1).transpose(1, 2))
matmul1_results.mul_(scale_t[0])
if mask is not None:
# Self Attention Time Mask
if use_time_mask:
assert (len(mask.size()) == 2), "Timing mask is not 2D!"
# assert (mask.size(0) == mask.size(1)), "Sequence length should match!"
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask, float('-inf'))
# Key Padding Mask
else:
batches, seql_q, seql_k = matmul1_results.size()
seqs = int(batches / heads)
matmul1_results = matmul1_results.view(seqs, heads, seql_q, seql_k)
mask = mask.to(torch.bool)
matmul1_results = matmul1_results.masked_fill_(mask.unsqueeze(1).unsqueeze(2), float('-inf'))
matmul1_results = matmul1_results.view(seqs * heads, seql_q, seql_k)
# Softmax and Dropout attention
softmax_results = F.softmax(matmul1_results, dim=-1)
# Dropout - is not executed for inference
if is_training:
dropout_results, dropout_mask = torch._fused_dropout(softmax_results, p=(1. - dropout_prob_t[0]))
else:
dropout_results = softmax_results
dropout_mask = null_tensor
nan_mask = torch.isnan(dropout_results)
if nan_mask.any():
dropout_results.masked_fill_(nan_mask, 0)
# Matmul2 Batched GEMMs
# The output tensor specification is needed here to specify the non-standard output.
# Given that pytorch cannot currently perform autograd with an output tensor specified,
# this requires a backward pass specified.
# Input1: from_softmax [seqs*heads, seql_q, seql_k]
# Input2: (values) [seql_v, seqs*heads, head_dim] transpose(0,1)
# Output: [seql_q, seqs*heads, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = (seql_q x head_dim)
if queries.is_cuda:
matmul2_results = torch.empty((dropout_results.size(1), dropout_results.size(0), values.size(2)),
dtype=dropout_results.dtype, device=queries.device).transpose(1, 0)
matmul2_results = torch.bmm(dropout_results, values.transpose(0, 1), out=matmul2_results)
else:
matmul2_results = torch.matmul(dropout_results, values.transpose(0, 1))
matmul2_results = matmul2_results.transpose(0, 1).contiguous().view(inputs.size(0), inputs.size(1),
inputs.size(2))
# Output Linear GEMM
# Input1: (activations) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ] transpose(0,1)
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
outputs = torch.addmm(output_biases,
matmul2_results.view(inputs.size(0) * inputs.size(1), inputs.size(2)),
output_weights.transpose(0, 1),
beta=1., alpha=1.)
outputs = outputs.view(inputs.size(0), inputs.size(1), output_weights.size(0))
ctx.save_for_backward(heads_t,
scale_t,
matmul2_results,
dropout_results,
softmax_results,
input_lin_results,
inputs,
input_weights,
output_weights,
dropout_mask,
dropout_prob_t,
sin, cos)
if return_coverage:
return (outputs, dropout_results)
else:
return (outputs,)
@staticmethod
# @custom_bwd
def backward(ctx, *output_grads):
if ctx.return_coverage:
output_grads, coverage_grads = output_grads
else:
output_grads = output_grads[0]
if ctx.fused:
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
attn_scores, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t, pad_mask = ctx.saved_tensors
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = self_multihead_attn_cuda.backward(heads_t[0], output_grads.contiguous(), matmul2_results,
dropout_results, attn_scores, pad_mask,
input_lin_results, inputs, input_weights,
output_weights, dropout_mask, dropout_prob_t[0])
return None, None, None, \
input_grads, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None
heads_t, \
scale_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t, \
sin, cos = ctx.saved_tensors
head_dim = inputs.size(2) // heads_t.item()
# Slice out q,k,v from one big Input Linear outuput (should only impact meta data, no copies!)
# Sequences and heads are combined to make the batch of the Batched GEMM
# input_lin_results: [seql_q, seqs, heads(16), 3, head_dim(64)]
# input_lin_results: [seql_q, batches=seqs*heads, 3, head_dim]
input_lin_results = input_lin_results.view(inputs.size(0), inputs.size(1) * heads_t[0], 3, head_dim)
queries = input_lin_results[:, :, 0, :]
keys = input_lin_results[:, :, 1, :]
values = input_lin_results[:, :, 2, :]
len_key = keys.size(0)
# Slice out q,k,v from one big set of gradients entering the input linear's bprop
# (should only impact meta data, no copies!)
# The gradients are identical in size to the Input Linear outputs.
# The tensor is declared before hand to properly slice out query, key, and value grads.
input_lin_results_grads = torch.empty_like(input_lin_results)
queries_grads = input_lin_results_grads[:, :, 0, :]
keys_grads = input_lin_results_grads[:, :, 1, :]
values_grads = input_lin_results_grads[:, :, 2, :]
# Output Linear GEMM - DGRAD
# Input1: (data grads) [seql_q, seqs, embed_dim=heads*head_dim]
# Input2: (weights) [ embed_dim, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( seql_q*seqs x embed_dim ) x ( embed_dim x embed_dim ) = ( seql_q*seqs x embed_dim )
output_grads = output_grads.contiguous()
output_lin_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), output_weights)
output_lin_grads = output_lin_grads.view(output_grads.size(0), output_grads.size(1), output_weights.size(1))
# Output Linear GEMM - WGRAD
# Input1: (data grads) [seql_q*seqs, embed_dim=heads*head_dim] transpose(0,1)
# Input2: (activations) [seql_q*seqs, embed_dim ]
# Output: [ seql_q, seqs, embed_dim ]
# GEMM: ( embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = ( embed_dim x embed_dim )
output_weight_grads = torch.mm(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)).transpose(0, 1),
matmul2_results.view(matmul2_results.size(0) * matmul2_results.size(1), matmul2_results.size(2)))
output_lin_grads = output_lin_grads.view(inputs.size(0), inputs.size(1) * heads_t[0], head_dim).transpose(0, 1)
output_bias_grads = torch.sum(
output_grads.view(output_grads.size(0) * output_grads.size(1), output_grads.size(2)), 0)
# Matmul2 - DGRAD1
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
matmul2_dgrad1 = torch.bmm(output_lin_grads, values.transpose(0, 1).transpose(1, 2))
# Matmul2 - DGRAD2
# Input1: (data grads) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1).transpose(1,2)
# Output: [seqs*heads, seql_q, seql_k]
# GEMM: Per batch: ( seql_q x head_dim ) x ( head_dim x seql_k ) = ( seql_q x seql_k )
values_grads = torch.bmm(dropout_results.transpose(1, 2), output_lin_grads, out=values_grads.transpose(0, 1))
dropout_grads = torch._masked_scale(matmul2_dgrad1, dropout_mask, 1.0 / (1.0 - dropout_prob_t[0]))
# Softmax Grad (not a publically documented op)
softmax_grads = torch._softmax_backward_data(dropout_grads, softmax_results, -1, softmax_results)
# Matmul1 - DGRAD1
# Input1: (data grads) [seqs*heads, seql_q, seql_k]
# Input2: (activations) [seql_k, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_q, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_q x seql_k ) x ( seql_k x head_dim ) = ( seql_q x head_dim )
torch.baddbmm(queries_grads.transpose(0, 1), softmax_grads, keys.transpose(0, 1),
out=queries_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
# Matmul1 - DGRAD2
# Input1: (data grads) [seqs*heads, seql_q, seql_k] transpose(1,2)
# Input2: (activations) [seql_q, seqs*heads, head_dim] transpose(0,1)
# Output: [seqs*heads, seql_k, head_dim] transpose(0,1)
# GEMM: Per batch: ( seql_k x seql_q ) x ( seql_q x head_dim ) = ( seql_k x head_dim )
torch.baddbmm(keys_grads.transpose(0, 1), softmax_grads.transpose(1, 2), queries.transpose(0, 1),
out=keys_grads.transpose(0, 1), beta=0.0, alpha=scale_t[0])
if ctx.rotary_pos_enc:
queries_grads_ = queries_grads * cos + rotate_backward(sin * queries_grads)
keys_grads_ = keys_grads * cos + rotate_backward(sin * keys_grads)
queries_grads.copy_(queries_grads_)
keys_grads.copy_(keys_grads_)
# Input Linear GEMM - DGRAD
# input1: (data grads) [seql_q, seqs, 3*embed_dim(3072)]
# input2: (weights) [embed_dim*3 (3072), embed_dim (1024)]
# output: [seql_q, seqs, embed_dim]
# GEMM: ( (seql_q*seqs) x 3*embed_dim ) x ( 3*embed_dim x embed_dim ) = (seql_q*seqs x embed_dim)
input_lin_results_grads = input_lin_results_grads.view(inputs.size(0) * inputs.size(1),
heads_t[0] * 3 * head_dim)
input_grads = torch.mm(input_lin_results_grads, input_weights)
input_grads = input_grads.view(inputs.size(0), inputs.size(1), inputs.size(2))
# Input Linear GEMM - WGRAD
# input1: (data grads) [seql_q*seqs, 3*embed_dim(3072)]
# input2: (activations) [seql_q*seqs, embed_dim(1024)]
# output: [3*embed_dim, embed_dim]
# GEMM: ( 3*embed_dim x seql_q*seqs ) x ( seql_q*seqs x embed_dim ) = (3*embed_dim x embed_dim)
input_weight_grads = torch.mm(input_lin_results_grads.transpose(0, 1),
inputs.view(inputs.size(0) * inputs.size(1), inputs.size(2)))
input_bias_grads = torch.sum(input_lin_results_grads, 0)
return None, None, None, \
input_grads, \
input_weight_grads, output_weight_grads, \
input_bias_grads, output_bias_grads, \
None, None, None, None, None, None, None, None
def _cast_if_autocast_enabled(*args):
if not torch.is_autocast_enabled():
return args
else:
try:
return torch.cuda.amp.autocast_mode._cast(args, torch.get_autocast_gpu_dtype())
except AttributeError:
return torch.cuda.amp.autocast_mode._cast(args, torch.half)
def self_attn_func(*args):
args = _cast_if_autocast_enabled(*args)
with torch.cuda.amp.autocast(enabled=False):
return SelfAttnFunc.apply(*args)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='reversible transformer')
parser.add_argument('-model_size', type=int, default=32,
help='Size of embedding / transformer hidden')
parser.add_argument('-gpu', default=0, type=int,
help="Seed for deterministic runs.")
test_function = self_attn_func
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
opt.layers = 2
opt.variational_dropout = False
opt.dropout = 0.0
opt.attn_dropout = 0.0
opt.n_heads = 4
opt.inner_size = 16
opt.head_dim = opt.model_size // opt.n_heads
class Parameters(torch.nn.Module):
def __init__(self, model_size=16, heads=1):
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.in_proj_weight = torch.Tensor(3 * model_size, model_size)
self.out_proj_weight = torch.Tensor(model_size, model_size)
self.in_proj_bias = torch.Tensor(3 * model_size)
self.out_proj_bias = torch.Tensor(model_size)
self.reset_parameters()
def reset_parameters(self):
std_ = 0.02
torch.nn.init.normal_(self.in_proj_weight, 0.0, std_)
torch.nn.init.normal_(self.out_proj_weight, 0.0, std_)
torch.nn.init.constant_(self.in_proj_bias, 0.)
torch.nn.init.constant_(self.out_proj_bias, 0.)
class TestAttention(torch.nn.Module):
def __init__(self, test_function, model_size=16, heads=1):
super().__init__()
self.model_size = model_size
self.heads = heads
self.head_dim = model_size // heads
self.function = test_function
def forward(self, input_weights, output_weights, input, input_biases, output_biases, mask,
use_time_mask=False):
is_training = True
dropout = 0.0
double_precision = True
return_coverage = False
# use_time_mask, is_training, heads, inputs,
# input_weights, output_weights,
# input_biases, output_biases,
# mask, dropout_prob,
# rotary_pos_enc, pos_emb,
# incremental, incremental_cache,
# return_coverage
return self.function(use_time_mask, is_training, self.heads, input,
input_weights, output_weights,
input_biases, output_biases,
mask, dropout,
False, None, # For the incremental stuff
False, None,
return_coverage) # double precision set to true
bsz = 4
len_q = 15
len_r = len_q
input_states = torch.randn(*(len_q, bsz, opt.model_size)).double().cuda()
input_states.requires_grad = True
net = TestAttention(test_function, model_size=opt.model_size, heads=opt.n_heads)
parameters = Parameters(opt.model_size, opt.n_heads)
in_proj_weight = parameters.in_proj_weight.double().cuda()
out_proj_weight = parameters.out_proj_weight.double().cuda()
in_proj_bias = parameters.in_proj_bias.double().cuda()
out_proj_bias = parameters.out_proj_bias.double().cuda()
in_proj_weight.requires_grad = True
out_proj_weight.requires_grad = True
in_proj_bias.requires_grad = True
out_proj_bias.requires_grad = True
mask = input_states.new(*(bsz, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking start.")
use_time_mask = False
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, use_time_mask), atol=1e-04, rtol=0.001)
mask = input_states.new(*(len_q, len_r)).bernoulli_(p=0.25).bool()
print("gradchecking with time mask start.")
use_time_mask = True
torch.autograd.gradcheck(net, (in_proj_weight, out_proj_weight, input_states,
in_proj_bias, out_proj_bias,
mask, use_time_mask), atol=1e-04, rtol=0.001)
print("gradchecking completed.")
| 24,368
| 44.464552
| 127
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_m2m100.py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch M2M100 model. """
import math
import random
from typing import Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from collections import defaultdict
from .activations import ACT2FN
from .modeling_outputs import (
BaseModelOutput,
)
from .modeling_utils import PreTrainedModel
# from ...utils import logging
from .configuration_m2m100 import M2M100Config
from onmt.modules.layer_norm import LayerNorm
from onmt.modules.optimized.self_attention_func import self_attn_func
from onmt.modules.optimized.encdec_attention_func_bias import encdec_attn_bias_func
from onmt.modules.dropout import embedded_dropout
from onmt.modules.optimized.dropout_add import fused_dropout_add
from onmt.modules.optimized.linear import linear_function
from torch.cuda.amp import custom_fwd, custom_bwd
_CONFIG_FOR_DOC = "M2M100Config"
_TOKENIZER_FOR_DOC = "M2M100Tokenizer"
_CHECKPOINT_FOR_DOC = "facebook/m2m100_418M"
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/m2m100_418M",
# See all M2M100 models at https://huggingface.co/models?filter=m2m_100
]
#
# # Copied from transformers.models.bart.modeling_bart.shift_tokens_right
# def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
# """
# Shift input ids one token to the right.
# """
# shifted_input_ids = input_ids.new_zeros(input_ids.shape)
# shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
# shifted_input_ids[:, 0] = decoder_start_token_id
#
# if pad_token_id is None:
# raise ValueError("self.model.config.pad_token_id has to be defined.")
# # replace possible -100 values in labels by `pad_token_id`
# shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
#
# return shifted_input_ids
#
#
# # Copied from transformers.models.bart.modeling_bart._make_causal_mask
# def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
# """
# Make causal mask used for bi-directional self-attention.
# """
# bsz, tgt_len = input_ids_shape
# mask = torch.full((tgt_len, tgt_len), float("-inf"))
# mask_cond = torch.arange(mask.size(-1))
# mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
# mask = mask.to(dtype)
#
# if past_key_values_length > 0:
# mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
# return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
#
#
# # Copied from transformers.models.bart.modeling_bart._expand_mask
# def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
# """
# Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
# """
# bsz, src_len = mask.size()
# tgt_len = tgt_len if tgt_len is not None else src_len
#
# expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
#
# inverted_mask = 1.0 - expanded_mask
#
# return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
class M2M100SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.weights = nn.Parameter(emb_weights)
self.weights.requires_grad = False
self.weights.detach_()
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
@torch.no_grad()
def forward(
self, input_ids: torch.Tensor = None, inputs_embeds: torch.Tensor = None, past_key_values_length: int = 0
):
if input_ids is not None:
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
else:
bsz, seq_len = inputs_embeds.size()[:-1]
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape).contiguous()
from .modeling_mbart import MBartAttention as M2M100Attention
from .modeling_mbart import MBartCrossAttention as M2M100CrossAttention
from .modeling_mbart import index_copy
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100
class M2M100EncoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100Attention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = config.activation_function
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: bool = False,
max_len=-1, cu_seqlens=None, **kwargs
):
"""
:param hidden_states:
:param attention_mask:
:param layer_head_mask:
:param output_attentions:
:param max_len:
:param cu_seqlens:
:param kwargs:
:return:
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
cu_seqlens=cu_seqlens,
max_len=max_len
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.fused and hidden_states.is_cuda:
weights = [self.fc1.weight, self.fc2.weight]
biases = [self.fc1.bias, self.fc2.bias]
dropout = self.activation_dropout if self.training else 0.0
hidden_states = self.fused_function(dropout, False, hidden_states, *weights, *biases).type_as(hidden_states)
else:
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->M2M100
class M2M100DecoderLayer(nn.Module):
def __init__(self, config: M2M100Config):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = M2M100Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = M2M100CrossAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.activation_fn_name = config.activation_function
self.fused = False
self.fused_function = None
if self.activation_fn_name == 'relu':
from onmt.modules.mlp.mlp import mlp_relu_function
if mlp_relu_function is not None:
self.fused_function = mlp_relu_function
self.fused = True
elif self.activation_fn_name == 'gelu':
from onmt.modules.mlp.mlp import mlp_gelu_function
if mlp_gelu_function is not None:
self.fused_function = mlp_gelu_function
self.fused = True
self.is_factorized = False
self.multiplicative_factorize = False
self.fast_factorize = False
self.ffn_dim = config.decoder_ffn_dim
self.n_languages = -1
self.has_adapter = False
self.adapter_location = -1
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False):
self.self_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative, fast=fast)
self.encoder_attn.add_factorized_weights(n_languages, rank=rank, multiplicative=multiplicative, fast=fast)
self.r_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.s_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.r_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.s_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
nn.init.normal_(self.r_i, 0.0, 0.02)
nn.init.normal_(self.s_i, 0.0, 0.02)
nn.init.normal_(self.r_o, 0.0, 0.02)
nn.init.normal_(self.s_o, 0.0, 0.02)
if multiplicative:
rank = rank if fast else 1
self.rm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
self.sm_i = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.rm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.embed_dim))
self.sm_o = torch.nn.Parameter(torch.Tensor(n_languages, rank, self.ffn_dim))
constant = math.sqrt(1.0 / rank) if fast else 1
nn.init.constant_(self.rm_i, constant)
nn.init.constant_(self.sm_i, constant)
nn.init.constant_(self.rm_o, constant)
nn.init.constant_(self.sm_o, constant)
def add_adapters(self, n_languages, downsampling_factor=4, adapter_location=1):
"""
:param n_languages: one adapter per language
:param downsampling_factor: downsampling rate size for the hidden layer
:param adapter_location:
:return:
"""
self.n_languages = n_languages
self.has_adapter = True
self.adapter_location = adapter_location
from .adapter import MultilingualAdapter
self.adapter = MultilingualAdapter(n_languages, self.embed_dim, downsample_factor=downsampling_factor)
def get_mlp_weights(self, lang=None, mixture=None):
in_weight = self.fc1.weight
out_weight = self.fc2.weight
in_bias = self.fc1.bias
out_bias = self.fc2.bias
if lang is not None:
assert mixture is None
if self.is_factorized:
if self.multiplicative_factorize:
rm_i = torch.index_select(self.rm_i, 0, lang).squeeze(0) # squeeze possible because only 1
sm_i = torch.index_select(self.sm_i, 0, lang).squeeze(0)
rm_o = torch.index_select(self.rm_o, 0, lang).squeeze(0)
sm_o = torch.index_select(self.sm_o, 0, lang).squeeze(0)
if self.fast_factorize:
mul_factor_in = torch.mm(rm_i.t(), sm_i)
mul_factor_out = torch.mm(rm_o.t(), sm_o)
else:
mul_factor_in = torch.bmm(rm_i.unsqueeze(-1), sm_i.unsqueeze(1)).sum(dim=0)
mul_factor_out = torch.bmm(rm_o.unsqueeze(-1), sm_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight * mul_factor_in
out_weight = out_weight * mul_factor_out
r_i = torch.index_select(self.r_i, 0, lang).squeeze(0)
s_i = torch.index_select(self.s_i, 0, lang).squeeze(0)
r_o = torch.index_select(self.r_o, 0, lang).squeeze(0)
s_o = torch.index_select(self.s_o, 0, lang).squeeze(0)
if self.fast_factorize:
add_factor_in = torch.mm(r_i.t(), s_i)
add_factor_out = torch.mm(r_o.t(), s_o)
else:
add_factor_in = torch.bmm(r_i.unsqueeze(-1), s_i.unsqueeze(1)).sum(dim=0)
add_factor_out = torch.bmm(r_o.unsqueeze(-1), s_o.unsqueeze(1)).sum(dim=0)
in_weight = in_weight + add_factor_in
out_weight = out_weight + add_factor_out
if mixture is not None:
raise NotImplementedError
return in_weight, out_weight, in_bias, out_bias
def call_mlp(self, x, in_weight, out_weight, in_bias, out_bias, activation_fn, dropout_p, training_,
fused, fused_function):
"""
Move the MLP section to a different function to choose between pytorch and custom mlp
:param x:
:param in_weight:
:param out_weight:
:param in_bias:
:param out_bias:
:param activation_fn:
:param dropout_p:
:param training_:
:param fused:
:param fused_function:
:return:
"""
# TODO: check type x torch.half or torch.float32
if fused and x.is_cuda:
dropout_p_ = dropout_p if training_ else 0.0
weights = [in_weight, out_weight]
biases = [in_bias, out_bias]
x = fused_function(dropout_p_, False, x, *weights, *biases)
else:
x = F.linear(x, in_weight, in_bias)
x = activation_fn(x)
x = F.dropout(x, dropout_p, training=training_)
x = F.linear(x, out_weight, out_bias)
return x
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
incremental: Optional[bool] = False,
incremental_cache=None,
lang=None, mixture=None, **kwargs
):
"""
:param hidden_states:
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param output_attentions:
:param incremental:
:param incremental_cache:
:param lang:
:param mixture:
:return:
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# print(hidden_states.size(), attention_mask.size(), encoder_hidden_states.size())
# Self Attention
hidden_states, self_attn_weights, incremental_cache = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
lang=lang
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights, incremental_cache = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
incremental=incremental, incremental_cache=incremental_cache,
lang=lang
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
# hidden_states = self.activation_fn(self.fc1(hidden_states))
# hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
# hidden_states = self.fc2(hidden_states)
in_weight, out_weight, in_bias, out_bias = self.get_mlp_weights(lang=lang, mixture=mixture)
hidden_states = self.call_mlp(hidden_states, in_weight, out_weight, in_bias, out_bias,
self.activation_fn, self.activation_dropout, self.training,
self.fused, self.fused_function)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if self.has_adapter:
residual = hidden_states
if self.adapter_location == 1:
assert lang is not None or mixture is not None
hidden_states = self.adapter(hidden_states, lang=lang, mixture=mixture)
hidden_states.add_(residual)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs, incremental_cache
class M2M100PreTrainedModel(PreTrainedModel):
config_class = M2M100Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (M2M100Decoder, M2M100Encoder)):
module.gradient_checkpointing = value
class M2M100Encoder(M2M100PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`M2M100EncoderLayer`].
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.layerdrop = opt.death_rate
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100EncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.word_dropout = opt.word_dropout
# Initialize weights and apply final processing
# self.post_init()
from onmt.modules.optimized.fast_mha import fast_bert_mha
self.fast_bert_mha = fast_bert_mha
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`M2M100Tokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`]
for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert `input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
embed_pos = self.embed_positions(input_ids, inputs_embeds)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# # expand attention_mask
# if attention_mask is not None:
# # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
# attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
can_run_fast_bert_mha = False
# check if fast bert mha can be run
seq_len = hidden_states.size(1)
bsz = hidden_states.size(0)
sm = torch.cuda.get_device_capability()
total_bsz = 0
if torch.is_autocast_enabled():
try:
hidden_states = torch.cuda.amp.autocast_mode._cast(hidden_states, torch.get_autocast_gpu_dtype())
except AttributeError:
hidden_states = torch.cuda.amp.autocast_mode._cast(hidden_states, torch.half)
# only run this when seq_len <= 512 and sm = 80/86 and type = half
# if self.fast_bert_mha is not None and (seq_len <= 512 and bsz >= 4 and sm[0] == 8 and sm[1] in [0, 6]) \
# and hidden_states.dtype == torch.half:
# can_run_fast_bert_mha = True
#
# x = hidden_states
# padding_mask = attention_mask # [B x T]
# # masked positions = 1 so to compute length we need the (1 -)
# if padding_mask is None:
# padding_mask = x.new_zeros(bsz, seq_len)
# padding_mask = padding_mask.long()
# lengths = (1 - padding_mask).sum(dim=1)
# lengths = lengths.cpu().tolist() # list of lengths for B seqs
#
# x = x.view(-1, x.size(-1))
# non_pad_indices = torch.nonzero(padding_mask.view(-1).ne(1)).squeeze(1)
# hidden_states = x.index_select(0, non_pad_indices)
#
# max_len = max(lengths)
# # cumulative sequence lengths (required input for fmha)
# a = torch.tensor(np.array([0] + lengths), dtype=torch.int32)
# cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
# else:
max_len = -1
cu_seqlens = None
non_pad_indices = None
if not can_run_fast_bert_mha:
# transpose from [B x T x H] to [T x B x H]
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
max_len=max_len, cu_seqlens=cu_seqlens,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if can_run_fast_bert_mha:
# remove the patch
# if x.size(0) > total_bsz:
# x = x[:total_bsz, :]
hidden_states = index_copy(hidden_states, non_pad_indices, bsz * seq_len)
hidden_states = hidden_states.view(bsz, seq_len, -1)
hidden_states = hidden_states.transpose(0, 1).contiguous()
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
class M2M100Decoder(M2M100PreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`M2M100DecoderLayer`]
Args:
config: M2M100Config
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: M2M100Config, opt, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
# override
config.dropout = opt.residual_dropout if opt.residual_dropout > 0 else opt.dropout
config.activation_dropout = opt.ffn_dropout if opt.ffn_dropout > 0 else opt.dropout
config.attention_dropout = opt.attn_dropout
self.dropout = config.dropout
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = M2M100SinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([M2M100DecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
# self.post_init()
self.model_size = config.d_model
self.switchout = 0.0
# self.word_lut = self.embed_tokens
self.config.bert_hidden_size = config.d_model
self.layerdrop = opt.death_rate_decoder
self.dec_pretrained_model = 'm2m100'
self.embed_tokens.weight.requires_grad = False
self.word_dropout = opt.word_dropout
def add_factorize(self, n_languages, rank=4, multiplicative=False, fast=False):
for layer in self.layers:
layer.add_factorize(n_languages, rank=rank, multiplicative=multiplicative, fast=fast)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
lang=None, mixture=None,
**kwargs
):
"""
:param input_ids:
:param attention_mask:
:param encoder_hidden_states:
:param encoder_attention_mask:
:param inputs_embeds:
:param output_attentions:
:param output_hidden_states:
:param incremental:
:param incremental_cache:
:param lang:
:param mixture:
:return:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = 0
if inputs_embeds is None:
inputs_embeds = embedded_dropout(self.embed_tokens, input_ids,
dropout=self.word_dropout if self.training else 0)
inputs_embeds = inputs_embeds * self.embed_scale
# embed positions
positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length)
# create autoregressive mask
qlen = input_ids.size(1)
klen = qlen
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = hidden_states.transpose(0, 1).contiguous()
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_outputs, _ = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
lang=lang,
mixture=mixture
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
def step(self, input, decoder_state, **kwargs):
# context is stored in the decoder state in [T B H] format
encoder_hidden_states = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffering = decoder_state.buffering
input_ids = input
input_shape = input_ids.size()
time_step = input.size(1)
input_ = input # input for the current time step
if buffering:
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1:]
past_key_values_length = input.size(1) - 1
else:
past_key_values_length = 0
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
qlen = input_ids.size(1)
klen = qlen
attention_mask = torch.triu(
inputs_embeds.new_ones(qlen, klen), diagonal=1).bool()
encoder_attention_mask = decoder_state.src_mask
# the past_key_values_length probably gives us the last time step
# print(input_ids.size(), inputs_embeds.size())
positions = self.embed_positions(input_ids, inputs_embeds, 0)
hidden_states = inputs_embeds + positions
if buffering:
hidden_states = hidden_states[:, -1:, :]
attention_mask = attention_mask[-1:, :]
hidden_states = hidden_states.transpose(0, 1).contiguous()
for idx, decoder_layer in enumerate(self.layers):
if buffering:
buffer = buffers[idx] if idx in buffers else None
else:
buffer = None
layer_outputs, buffer = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=None,
incremental=buffering, incremental_cache=buffer,
lang=lang, mixture=None
)
if buffering:
decoder_state.update_attention_buffer(buffer, idx)
hidden_states = layer_outputs[0]
hidden_states = self.layer_norm(hidden_states)
output = hidden_states[-1].unsqueeze(0)
# just a fake coverage
coverage = hidden_states.new(hidden_states.size(1), 1, encoder_hidden_states.size(0)).zero_()
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = encoder_hidden_states
return output_dict
| 41,949
| 40.208251
| 120
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_m2m100.py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" M2M100 model configuration"""
from .configuration_utils import PretrainedConfig
M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/config.json",
# See all M2M100 models at https://huggingface.co/models?filter=m2m_100
}
class M2M100Config(PretrainedConfig):
model_type = "m2m_100"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=128112,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.05,
decoder_layerdrop=0.05,
use_cache=True,
is_encoder_decoder=True,
activation_function="relu",
d_model=1024,
dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
scale_embedding=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
| 3,133
| 35.44186
| 99
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/configuration_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
from typing import Any, Dict, Tuple
from .file_utils import CONFIG_NAME # hf_bucket_url, is_remote_url
class PretrainedConfig(object):
r""" Base class for all configuration classes.
Handles a few parameters common to all models' configurations as well as methods for loading/downloading/saving
configurations.
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list
of heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer
2.
xla_device (:obj:`bool`, `optional`):
A flag to indicate if TPU are available or not.
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by
default in the :obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by
default in the :obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in
the :obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by
default in the :obj:`generate` method of the model. Whether to stop the beam search when at least
``num_beams`` sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be
used by default in the :obj:`generate` method of the model. 1 means no beam search.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to
keep for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens
with probabilities that add up to ``top_p`` or highest are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty
that will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that
will be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default
in the :obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of
that size can only occur once.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be
generated that will be used by default in the :obj:`generate` method of the model. In order to get the
tokens of the words that should not appear in the generated text, use
:obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed
returned sequences for each element in the batch that will be used by default in the :obj:`generate`
method of the model.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the
model pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`List[str]`, `optional`) -- A map from index (for instance prediction index, or target
index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for
the current task.
Parameters linked to the tokenizer
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each
text before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with
a different token than `bos`, the id of that token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should
use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
print("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: str):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
print("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str`):
This can be either:
- the `shortcut name` of a pretrained model configuration to load from cache or download, e.g.,
``bert-base-uncased``.
- the `identifier name` of a pretrained model configuration that was uploaded to our S3 by any user,
e.g., ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.`
The proxies are used on each request.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is
controlled by the ``return_unused_kwargs`` keyword parameter.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
assert config.output_attention == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
foo=False, return_unused_kwargs=True)
assert config.output_attention == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(config_dict, **kwargs)
@classmethod
def from_json_file(cls, json_file: str) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: str):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default
config attributes for better readability and serializes to a Python
dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if key not in default_config_dict or value != default_config_dict[key]:
serializable_config_dict[key] = value
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: str, use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
| 21,508
| 56.819892
| 153
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/positional_embeddings.py
|
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import math
def make_positions(tensor, padding_idx: int):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = make_positions(
input, self.padding_idx
)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.register_buffer("_float_tensor", torch.FloatTensor(1), persistent=False)
self.max_positions = int(1e5)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.size(0), input.size(1)
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(
input, self.padding_idx
)
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
| 5,604
| 37.655172
| 94
|
py
|
NMTGMinor
|
NMTGMinor-master/pretrain_module/modeling_outputs.py
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from .file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
| 1,557
| 49.258065
| 168
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.