text stringlengths 957 885k |
|---|
<reponame>dahliaOS/fuchsia-pi4
#!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import shutil
import tarfile
import unittest
from unittest import mock
from gather_package_deps import GatherPackageDeps
class GatherPackageDepsTests(unittest.TestCase):
source_dir = None
package_json_path = None
meta_far_path = None
output_dir = None
def setUp(self):
self.source_dir = tempfile.TemporaryDirectory()
self.output_dir = tempfile.TemporaryDirectory()
self.package_json_path = os.path.join(self.source_dir.name, 'pkg.json')
self.meta_far_path = os.path.join(self.source_dir.name, 'meta.far')
self.depfile = os.path.join(self.source_dir.name, 'depfile.d')
# Create placeholder files.
open(self.package_json_path, 'a').close()
open(self.meta_far_path, 'a').close()
def tearDown(self):
self.source_dir.cleanup()
self.output_dir.cleanup()
def test_init(self):
GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
'', self.meta_far_path, self.output_dir.name, self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
None, self.meta_far_path, self.output_dir.name, self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, '', self.output_dir.name, self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, None, self.output_dir.name,
self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, self.meta_far_path, '', self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, self.meta_far_path, None, self.depfile)
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, self.meta_far_path,
self.output_dir.name, '')
with self.assertRaises(ValueError):
GatherPackageDeps(
self.package_json_path, self.meta_far_path,
self.output_dir.name, None)
def test_parse_package_json(self):
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
with open(self.package_json_path, 'w') as f:
f.write('noooot JSOOOOON')
with self.assertRaises(ValueError):
gatherer.parse_package_json()
with open(self.package_json_path, 'w') as f:
f.write(r'{ }')
with self.assertRaises(KeyError):
gatherer.parse_package_json()
with open(self.package_json_path, 'w') as f:
f.write(r'{ "blobs": [] }')
manifest_dict = gatherer.parse_package_json()
self.assertDictEqual(manifest_dict, {})
with open(self.package_json_path, 'w') as f:
f.write(
"""{ "blobs":
[ { "source_path": "some/path/A", "path": "path/A" } ]
}
""")
manifest_dict = gatherer.parse_package_json()
self.assertDictEqual(manifest_dict, {'path/A': 'some/path/A'})
with open(self.package_json_path, 'w') as f:
f.write(
"""{ "blobs":
[
{ "source_path": "some/path/A", "path": "path/A" },
{ "source_path": "some/path/B", "path": "path/B" }
]
}
""")
manifest_dict = gatherer.parse_package_json()
self.assertDictEqual(
manifest_dict, {
'path/A': 'some/path/A',
'path/B': 'some/path/B'
})
@unittest.mock.patch.object(shutil, 'copyfile', autospec=True)
def test_copy_meta_far(self, copyfile_mock):
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
gatherer.copy_meta_far()
copyfile_mock.assert_called_once_with(
self.meta_far_path, os.path.join(self.output_dir.name, 'meta.far'))
@unittest.mock.patch.object(shutil, 'copyfile', autospec=True)
@unittest.mock.patch.object(os, 'makedirs', autospec=True)
def test_copy_to_output_dir(self, makedirs_mock, copyfile_mock):
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
test_dict = {
'path/A': '../../../thing/A',
'path/B': 'thing/B',
'path/to/C': '/abs/path/C'
}
gatherer.copy_to_output_dir(test_dict)
self.assertEqual(3, makedirs_mock.call_count)
self.assertEqual(3, copyfile_mock.call_count)
self.assertDictEqual(
test_dict, {
'path/A': 'thing/A',
'path/B': 'thing/B',
'path/to/C': 'abs/path/C'
})
def test_write_new_manifest(self):
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
gatherer.write_new_manifest(
{
'path/A': 'some/path/A',
'path/B': 'some/path/B'
})
with open(os.path.join(self.output_dir.name, 'package.manifest'),
'r') as f:
manifest_data = f.read()
self.assertIn('path/A=some/path/A', manifest_data)
self.assertIn('path/B=some/path/B', manifest_data)
self.assertIn('meta/package=meta.far', manifest_data)
def test_archive_output(self):
tar_path = os.path.join(self.output_dir.name, 'package.tar')
self.assertFalse(os.path.isfile(tar_path))
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
gatherer.archive_output(tar_path)
self.assertTrue(os.path.isfile(tar_path))
file_a = os.path.join(self.output_dir.name, 'fileA')
file_b = os.path.join(self.output_dir.name, 'sub', 'fileB')
file_c = os.path.join(self.output_dir.name, 'another', 'dir', 'fileC')
with open(file_a, 'w') as f:
f.write('A')
os.makedirs(os.path.dirname(file_b), exist_ok=False)
with open(file_b, 'w') as f:
f.write('BB')
os.makedirs(os.path.dirname(file_c), exist_ok=False)
with open(file_c, 'w') as f:
f.write('CCC')
gatherer.archive_output(tar_path)
self.assertTrue(os.path.isfile(tar_path))
# Test that all temporary files are deleted after archive.
for f in (file_a, file_b, file_c):
self.assertFalse(os.path.exists(f))
# Main thing we need to check here is that the paths within the archive
# are relative instead of absolute, because absolute paths relative to the
# system that created the archive are meaningless for anyone unarchiving.
size_index = {'./fileA': 1, 'sub/fileB': 2, 'another/dir/fileC': 3}
with tarfile.open(tar_path, 'r') as tar:
for member in tar.getmembers():
self.assertIn(member.name, size_index)
self.assertEqual(member.size, size_index[member.name])
def test_run(self):
backup_cwd = os.getcwd()
os.chdir(self.source_dir.name)
file_meta = 'meta.far'
file_a = 'fileA'
file_b = os.path.join('sub', 'fileB')
file_c = os.path.join('another', 'dir', 'fileC')
open(file_meta, 'a').close()
open(file_a, 'a').close()
os.makedirs(os.path.dirname(file_b), exist_ok=False)
open(file_b, 'a').close()
os.makedirs(os.path.dirname(file_c), exist_ok=False)
open(file_c, 'a').close()
with open(self.package_json_path, 'w') as f:
f.write(
"""{{ "blobs":
[
{{ "source_path": "{}", "path": "path/A" }},
{{ "source_path": "{}", "path": "path/B" }},
{{ "source_path": "{}", "path": "path/C" }}
]
}}
""".format(file_a, file_b, file_c))
gatherer = GatherPackageDeps(
self.package_json_path, self.meta_far_path, self.output_dir.name,
self.depfile)
gatherer.run()
expected_files = {
'./package.manifest', './meta.far', './fileA', 'sub/fileB',
'another/dir/fileC'
}
observed_files = set()
expected_manifest_lines = {
'path/A=fileA', 'path/B=sub/fileB', 'path/C=another/dir/fileC',
'meta/package=meta.far'
}
observed_manifest_lines = set()
output_tar = os.path.join(self.output_dir.name, 'package.tar')
with tarfile.open(output_tar, 'r') as tar:
for member in tar.getmembers():
observed_files.add(member.name)
if member.name == './package.manifest':
for line in tar.extractfile(member).readlines():
observed_manifest_lines.add(
line.decode("utf-8").strip())
self.assertEqual(observed_files, expected_files)
self.assertEqual(observed_manifest_lines, expected_manifest_lines)
with open(self.depfile, 'r') as f:
observed_depfile = f.read()
expected_depfile = f'{output_tar}: fileA sub/fileB another/dir/fileC\n'
self.assertEqual(observed_depfile, expected_depfile)
os.chdir(backup_cwd)
if __name__ == '__main__':
unittest.main()
|
"""This module contains the general information for ProcessorSecurityStats ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ProcessorSecurityStatsConsts:
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class ProcessorSecurityStats(ManagedObject):
"""This is ProcessorSecurityStats class."""
consts = ProcessorSecurityStatsConsts()
naming_props = set([])
mo_meta = MoMeta("ProcessorSecurityStats", "processorSecurityStats", "security-stats", VersionMeta.Version412a, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], ['processorUnit'], [], [None])
prop_meta = {
"psp_correctable_errors": MoPropertyMeta("psp_correctable_errors", "PSPCorrectableErrors", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors15_min": MoPropertyMeta("psp_correctable_errors15_min", "PSPCorrectableErrors15Min", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors15_min_h": MoPropertyMeta("psp_correctable_errors15_min_h", "PSPCorrectableErrors15MinH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_day": MoPropertyMeta("psp_correctable_errors1_day", "PSPCorrectableErrors1Day", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_day_h": MoPropertyMeta("psp_correctable_errors1_day_h", "PSPCorrectableErrors1DayH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_hour": MoPropertyMeta("psp_correctable_errors1_hour", "PSPCorrectableErrors1Hour", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_hour_h": MoPropertyMeta("psp_correctable_errors1_hour_h", "PSPCorrectableErrors1HourH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_week": MoPropertyMeta("psp_correctable_errors1_week", "PSPCorrectableErrors1Week", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors1_week_h": MoPropertyMeta("psp_correctable_errors1_week_h", "PSPCorrectableErrors1WeekH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors2_weeks": MoPropertyMeta("psp_correctable_errors2_weeks", "PSPCorrectableErrors2Weeks", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_correctable_errors2_weeks_h": MoPropertyMeta("psp_correctable_errors2_weeks_h", "PSPCorrectableErrors2WeeksH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors": MoPropertyMeta("psp_uncorrectable_errors", "PSPUncorrectableErrors", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors15_min": MoPropertyMeta("psp_uncorrectable_errors15_min", "PSPUncorrectableErrors15Min", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors15_min_h": MoPropertyMeta("psp_uncorrectable_errors15_min_h", "PSPUncorrectableErrors15MinH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_day": MoPropertyMeta("psp_uncorrectable_errors1_day", "PSPUncorrectableErrors1Day", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_day_h": MoPropertyMeta("psp_uncorrectable_errors1_day_h", "PSPUncorrectableErrors1DayH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_hour": MoPropertyMeta("psp_uncorrectable_errors1_hour", "PSPUncorrectableErrors1Hour", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_hour_h": MoPropertyMeta("psp_uncorrectable_errors1_hour_h", "PSPUncorrectableErrors1HourH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_week": MoPropertyMeta("psp_uncorrectable_errors1_week", "PSPUncorrectableErrors1Week", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors1_week_h": MoPropertyMeta("psp_uncorrectable_errors1_week_h", "PSPUncorrectableErrors1WeekH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors2_weeks": MoPropertyMeta("psp_uncorrectable_errors2_weeks", "PSPUncorrectableErrors2Weeks", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"psp_uncorrectable_errors2_weeks_h": MoPropertyMeta("psp_uncorrectable_errors2_weeks_h", "PSPUncorrectableErrors2WeeksH", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version412a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version412a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version412a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"PSPCorrectableErrors": "psp_correctable_errors",
"PSPCorrectableErrors15Min": "psp_correctable_errors15_min",
"PSPCorrectableErrors15MinH": "psp_correctable_errors15_min_h",
"PSPCorrectableErrors1Day": "psp_correctable_errors1_day",
"PSPCorrectableErrors1DayH": "psp_correctable_errors1_day_h",
"PSPCorrectableErrors1Hour": "psp_correctable_errors1_hour",
"PSPCorrectableErrors1HourH": "psp_correctable_errors1_hour_h",
"PSPCorrectableErrors1Week": "psp_correctable_errors1_week",
"PSPCorrectableErrors1WeekH": "psp_correctable_errors1_week_h",
"PSPCorrectableErrors2Weeks": "psp_correctable_errors2_weeks",
"PSPCorrectableErrors2WeeksH": "psp_correctable_errors2_weeks_h",
"PSPUncorrectableErrors": "psp_uncorrectable_errors",
"PSPUncorrectableErrors15Min": "psp_uncorrectable_errors15_min",
"PSPUncorrectableErrors15MinH": "psp_uncorrectable_errors15_min_h",
"PSPUncorrectableErrors1Day": "psp_uncorrectable_errors1_day",
"PSPUncorrectableErrors1DayH": "psp_uncorrectable_errors1_day_h",
"PSPUncorrectableErrors1Hour": "psp_uncorrectable_errors1_hour",
"PSPUncorrectableErrors1HourH": "psp_uncorrectable_errors1_hour_h",
"PSPUncorrectableErrors1Week": "psp_uncorrectable_errors1_week",
"PSPUncorrectableErrors1WeekH": "psp_uncorrectable_errors1_week_h",
"PSPUncorrectableErrors2Weeks": "psp_uncorrectable_errors2_weeks",
"PSPUncorrectableErrors2WeeksH": "psp_uncorrectable_errors2_weeks_h",
"childAction": "child_action",
"dn": "dn",
"intervals": "intervals",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"update": "update",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.psp_correctable_errors = None
self.psp_correctable_errors15_min = None
self.psp_correctable_errors15_min_h = None
self.psp_correctable_errors1_day = None
self.psp_correctable_errors1_day_h = None
self.psp_correctable_errors1_hour = None
self.psp_correctable_errors1_hour_h = None
self.psp_correctable_errors1_week = None
self.psp_correctable_errors1_week_h = None
self.psp_correctable_errors2_weeks = None
self.psp_correctable_errors2_weeks_h = None
self.psp_uncorrectable_errors = None
self.psp_uncorrectable_errors15_min = None
self.psp_uncorrectable_errors15_min_h = None
self.psp_uncorrectable_errors1_day = None
self.psp_uncorrectable_errors1_day_h = None
self.psp_uncorrectable_errors1_hour = None
self.psp_uncorrectable_errors1_hour_h = None
self.psp_uncorrectable_errors1_week = None
self.psp_uncorrectable_errors1_week_h = None
self.psp_uncorrectable_errors2_weeks = None
self.psp_uncorrectable_errors2_weeks_h = None
self.child_action = None
self.intervals = None
self.sacl = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.update = None
ManagedObject.__init__(self, "ProcessorSecurityStats", parent_mo_or_dn, **kwargs)
|
# Copyright (c) 2021-2021, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import json
import os
import re
import subprocess
import sys
from argparse import ArgumentParser
from typing import Any, Dict, List, Optional, Type, Union, cast
import pkg_resources
import requests
import yaml
from cookiecutter.log import configure_logger
from cookiecutter.main import cookiecutter
_bad_chars_re = re.compile("[^a-zA-Z0-9_]")
SCAFFOLDS_DIR = pkg_resources.resource_filename("c2cgeoportal_geoportal", "scaffolds")
def main() -> int:
"""Entry point to run PCreateCommand."""
command = PCreateCommand(sys.argv)
try:
return command.run()
except KeyboardInterrupt: # pragma: no cover
return 1
class PCreateCommand:
"""
Wrapper around cookiecutter with appropriated context creator for our scaffolds.
This is a port of Pyramid 1 PCreateCommand using cookiecutter as a backend.
"""
parser = ArgumentParser(
prog=sys.argv[0],
add_help=True,
description="Wrapper around cookiecutter that create appropriated context.",
)
parser.add_argument(
"-s",
"--scaffold",
dest="scaffold_names",
action="append",
help=("Add a scaffold to the create process " "(multiple -s args accepted)"),
)
parser.add_argument(
"-l",
"--list",
dest="list",
action="store_true",
help="List all available scaffold names",
)
parser.add_argument(
"--package-name",
dest="package_name",
action="store",
help="Package name to use. The name provided is "
"assumed to be a valid Python package name, and "
"will not be validated. By default the package "
"name is derived from the value of "
"output_directory.",
)
parser.add_argument(
"--overwrite",
dest="overwrite",
action="store_true",
help="Always overwrite",
)
parser.add_argument(
"output_directory",
nargs="?",
default=None,
help="The directory where the project will be " "created.",
)
def __init__(self, argv: List[str], quiet: bool = False) -> None:
self.quiet = quiet
self.args = self.parser.parse_args(argv[1:])
self.scaffolds = self.all_scaffolds()
def run(self) -> int:
if self.args.list:
return self.show_scaffolds()
if not self.args.scaffold_names and not self.args.output_directory:
if not self.quiet: # pragma: no cover
self.parser.print_help()
self.out("")
self.show_scaffolds()
return 2
return self.render_scaffolds()
@property
def output_path(self) -> str:
return cast(str, os.path.abspath(os.path.normpath(self.args.output_directory)))
def render_scaffolds(self) -> int:
verbose = True
debug_file = None
configure_logger(stream_level="DEBUG" if verbose else "INFO", debug_file=debug_file)
context = self.get_context()
for scaffold_name in self.args.scaffold_names:
# Needed to be backward compatible for the `test-upgrade init` command
if scaffold_name.startswith("c2cgeoportal_"):
scaffold_name = scaffold_name[len("c2cgeoportal_") :]
self.out(f"Rendering scaffold: {scaffold_name}")
cookiecutter(
template=os.path.join(SCAFFOLDS_DIR, scaffold_name),
extra_context=context,
no_input=True,
overwrite_if_exists=self.args.overwrite,
output_dir=os.path.dirname(self.output_path),
)
return 0
def show_scaffolds(self) -> int:
scaffolds = sorted(self.scaffolds)
if scaffolds:
self.out("Available scaffolds:")
for scaffold in scaffolds:
self.out(f" {scaffold}")
else:
self.out("No scaffolds available")
return 0
@staticmethod
def all_scaffolds() -> List[str]:
return os.listdir(SCAFFOLDS_DIR)
def out(self, msg: str) -> None:
if not self.quiet:
print(msg)
def get_context(self) -> Dict[str, Union[str, int]]:
output_dir = self.output_path
project_name = os.path.basename(output_dir)
if self.args.package_name is None:
pkg_name = _bad_chars_re.sub("", project_name.lower().replace("-", "_"))
else:
pkg_name = self.args.package_name
context: Dict[str, Union[str, int]] = {
"project": project_name,
"package": pkg_name,
"authtkt_secret": gen_authtkt_secret(),
}
context.update(self.read_project_file())
if os.environ.get("CI") == "true":
context[ # nosec
"authtkt_secret"
] = "io7heoDui8xaikie1rushaeGeiph8Bequei6ohchaequob6viejei0xooWeuvohf"
self.get_var(context, "srid", "Spatial Reference System Identifier (e.g. 2056): ", int)
srid = cast(int, context["srid"])
extent = self.epsg2bbox(srid)
self.get_var(
context,
"extent",
f"Extent (minx miny maxx maxy): in EPSG: {srid} projection, default is "
f"[{extent[0]} {extent[1]} {extent[2]} {extent[3]}]: "
if extent
else f"Extent (minx miny maxx maxy): in EPSG: {srid} projection: ",
)
match = re.match(
r"([\d.]+)[,; ] *([\d.]+)[,; ] *([\d.]+)[,; ] *([\d.]+)",
cast(str, context["extent"]),
)
if match is not None:
extent = [match.group(n + 1) for n in range(4)]
assert extent is not None
context["extent"] = ",".join(extent)
context["extent_mapserver"] = " ".join(extent)
if context["package"] == "site":
raise ValueError(
"Sorry, you may not name your package 'site'. "
"The package name 'site' has a special meaning in "
"Python. Please name it anything except 'site'."
)
package_logger = context["package"]
if package_logger == "root":
# Rename the app logger in the rare case a project
# is named "root"
package_logger = "app"
context["package_logger"] = package_logger
context["geomapfish_version"] = os.environ["VERSION"]
# Used in the Docker files to shoos the version of the build image
context["geomapfish_version_tag"] = "GEOMAPFISH_VERSION"
context["geomapfish_version_tag_env"] = "${GEOMAPFISH_VERSION}"
geomapfish_major_version_tag = (
"GEOMAPFISH_VERSION" if context.get("unsafe_long_version", False) else "GEOMAPFISH_MAIN_VERSION"
)
# Used in the Docker files to shoos the version of the run image
context["geomapfish_major_version_tag"] = geomapfish_major_version_tag
context["geomapfish_major_version_tag_env"] = "${" + geomapfish_major_version_tag + "}"
context["geomapfish_main_version"] = os.environ["MAJOR_VERSION"]
context["geomapfish_main_version_dash"] = os.environ["MAJOR_VERSION"].replace(".", "-")
return context
def read_project_file(self) -> Dict[str, Union[str, int]]:
project_file = os.path.join(self.output_path, "project.yaml")
if os.path.exists(project_file):
with open(project_file, encoding="utf8") as f:
project = yaml.safe_load(f)
return cast(Dict[str, Union[str, int]], project.get("template_vars", {}))
else:
return {}
@staticmethod
def get_var(
context: Dict[str, Any],
name: str,
prompt: str,
type_: Optional[Type[Any]] = None,
) -> None:
if name.upper() in os.environ and os.environ[name.upper()] != "":
value = os.environ.get(name.upper())
else:
value = context.get(name)
if value is None:
value = input(prompt).strip()
if type_ is not None and not isinstance(value, type_):
try:
value = type_(value)
except ValueError:
print(f"The attribute {name}={value} is not a {type_}")
sys.exit(1)
context[name] = value
@staticmethod
def epsg2bbox(srid: int) -> Optional[List[str]]:
try:
r = requests.get(f"https://epsg.io/?format=json&q={srid}")
bbox = r.json()["results"][0]["bbox"]
r = requests.get(
"https://epsg.io/trans?s_srs=4326&t_srs={srid}&data={bbox[1]},{bbox[0]}".format(
srid=srid, bbox=bbox
)
)
r1 = r.json()[0]
r = requests.get(
"https://epsg.io/trans?s_srs=4326&t_srs={srid}&data={bbox[3]},{bbox[2]}".format(
srid=srid, bbox=bbox
)
)
r2 = r.json()[0]
return [r1["x"], r2["y"], r2["x"], r1["y"]]
except requests.RequestException:
print("Failed to establish a connection to epsg.io.")
except json.JSONDecodeError:
print("epsg.io doesn't return a correct json.")
except IndexError:
print("Unable to get the bbox")
except Exception as exception:
print(f"unexpected error: {str(exception)}")
return None
def gen_authtkt_secret() -> str:
"""Generate a random authtkt secret."""
return subprocess.run(["pwgen", "64"], stdout=subprocess.PIPE, check=True).stdout.decode().strip()
if __name__ == "__main__": # pragma: no cover
sys.exit(main() or 0)
|
from math import exp, log, pi, sqrt
from typing import List, Tuple
__all__ = ["Glicko2Entry", "glicko2_update", "glicko2_configure"]
EPSILON = 0.000001
# TAO = 1.2
TAO = 0.5
LOSS = 0.0
DRAW = 0.5
WIN = 1.0
MAX_RD = 500.0
MIN_RD = 30.0
MIN_VOLATILITY = 0.01
MAX_VOLATILITY = 0.15
MIN_RATING = 100.0
MAX_RATING = 6000.0
PROVISIONAL_RATING_CUTOFF = 160.0
GLICKO2_SCALE = 173.7178
class Glicko2Entry:
rating: float
deviation: float
volatility: float
mu: float
phi: float
def __init__(self, rating: float = 1500, deviation: float = 350, volatility: float = 0.06) -> None:
self.rating = rating
self.deviation = deviation
self.volatility = volatility
self.mu = (self.rating - 1500) / GLICKO2_SCALE
self.phi = self.deviation / GLICKO2_SCALE
def __str__(self) -> str:
return "%7.2f +- %6.2f (%.6f [%.4f])" % (
self.rating,
self.deviation,
self.volatility,
self.volatility * GLICKO2_SCALE,
)
def copy(self, rating_adjustment: float = 0.0, rd_adjustment: float = 0.0) -> "Glicko2Entry":
ret = Glicko2Entry(self.rating + rating_adjustment, self.deviation + rd_adjustment, self.volatility,)
return ret
def expand_deviation_because_no_games_played(self, n_periods: int = 1) -> "Glicko2Entry":
# Implementation as defined by:
# http://www.glicko.net/glicko/glicko2.pdf (note after step 8)
global MAX_RD
global MIN_RD
for _i in range(n_periods):
phi_prime = sqrt(self.phi ** 2 + self.volatility ** 2)
self.deviation = min(MAX_RD, max(MIN_RD, GLICKO2_SCALE * phi_prime))
self.phi = self.deviation / GLICKO2_SCALE
return self
def expected_win_probability(self, white: "Glicko2Entry", handicap_adjustment: float, ignore_g: bool = False) -> float:
# Implementation as defined by: http://www.glicko.net/glicko/glicko.pdf
q = 0.0057565
if not ignore_g:
def g(rd: float) -> float:
return 1
else:
def g(rd: float) -> float:
return 1 / sqrt(1 + 3 * q ** 2 * (self.deviation ** 2) / pi ** 2)
E = 1 / (
1
+ (
10
** (
-g(sqrt(self.deviation ** 2 + white.deviation ** 2))
* (self.rating + handicap_adjustment - white.rating)
/ 400
)
)
)
return E
def glicko2_update(player: Glicko2Entry, matches: List[Tuple[Glicko2Entry, int]]) -> Glicko2Entry:
# Implementation as defined by: http://www.glicko.net/glicko/glicko2.pdf
if len(matches) == 0:
return player.copy()
# step 1/2 implicitly done during Glicko2Entry construction
# step 3 / 4, compute 'v' and delta
v_sum = 0.0
delta_sum = 0.0
for m in matches:
p = m[0]
outcome = m[1]
g_phi_j = 1 / sqrt(1 + (3 * p.phi ** 2) / (pi ** 2))
E = 1 / (1 + exp(-g_phi_j * (player.mu - p.mu)))
v_sum += g_phi_j ** 2 * E * (1 - E)
delta_sum += g_phi_j * (outcome - E)
v = 1.0 / v_sum if v_sum else 9999
delta = v * delta_sum
# step 5
a = log(player.volatility ** 2)
def f(x: float) -> float:
ex = exp(x)
return (ex * (delta ** 2 - player.phi ** 2 - v - ex) / (2 * ((player.phi ** 2 + v + ex) ** 2))) - (
(x - a) / (TAO ** 2)
)
A = a
if delta ** 2 > player.phi ** 2 + v:
B = log(delta ** 2 - player.phi ** 2 - v)
else:
k = 1
safety = 100
while f(a - k * TAO) < 0 and safety > 0: # pragma: no cover
safety -= 1
k += 1
B = a - k * TAO
fA = f(A)
fB = f(B)
safety = 100
while abs(B - A) > EPSILON and safety > 0:
C = A + (A - B) * fA / (fB - fA)
fC = f(C)
if fC * fB < 0:
A = B
fA = fB
else:
fA = fA / 2
B = C
fB = fC
safety -= 1
new_volatility = exp(A / 2)
# step 6
phi_star = sqrt(player.phi ** 2 + new_volatility ** 2)
# step 7
phi_prime = 1 / sqrt(1 / phi_star ** 2 + 1 / v)
mu_prime = player.mu + (phi_prime ** 2) * delta_sum
# step 8
ret = Glicko2Entry(
rating=min(MAX_RATING, max(MIN_RATING, GLICKO2_SCALE * mu_prime + 1500)),
deviation=min(MAX_RD, max(MIN_RD, GLICKO2_SCALE * phi_prime)),
volatility=min(0.15, max(0.01, new_volatility)),
)
return ret
def glicko2_configure(tao: float, min_rd: float, max_rd: float) -> None:
global TAO
global MIN_RD
global MAX_RD
TAO = tao
MIN_RD = min_rd
MAX_RD = max_rd
|
"""
Credits:
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
Copyright (c) 2017-2019 <NAME>, <NAME>, <NAME>, <NAME> (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import unittest
import os
import logging
import tempfile
import datetime
from eolearn.core import EOTask, EOWorkflow, Dependency, EOExecutor, WorkflowResults
logging.basicConfig(level=logging.DEBUG)
class ExampleTask(EOTask):
def execute(self, *_, **kwargs):
my_logger = logging.getLogger(__file__)
my_logger.info('Info statement of Example task with kwargs: %s', kwargs)
my_logger.warning('Warning statement of Example task with kwargs: %s', kwargs)
my_logger.debug('Debug statement of Example task with kwargs: %s', kwargs)
if 'arg1' in kwargs and kwargs['arg1'] is None:
raise Exception
class FooTask(EOTask):
@staticmethod
def execute(*_, **__):
return 42
class TestEOExecutor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.task = ExampleTask()
cls.final_task = FooTask()
cls.workflow = EOWorkflow([(cls.task, []),
Dependency(task=cls.final_task, inputs=[cls.task, cls.task])])
cls.execution_args = [
{cls.task: {'arg1': 1}},
{},
{cls.task: {'arg1': 3, 'arg3': 10}},
{cls.task: {'arg1': None}}
]
def test_execution_logs(self):
for execution_names in [None, [4, 'x', 'y', 'z']]:
with tempfile.TemporaryDirectory() as tmp_dir_name:
executor = EOExecutor(self.workflow, self.execution_args, save_logs=True, logs_folder=tmp_dir_name,
execution_names=execution_names)
executor.run()
self.assertEqual(len(executor.execution_logs), 4)
for log in executor.execution_logs:
self.assertTrue(len(log.split()) >= 3)
log_filenames = sorted(os.listdir(executor.report_folder))
self.assertEqual(len(log_filenames), 4)
if execution_names:
for name, log_filename in zip(execution_names, log_filenames):
self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))
def test_execution_logs_multiprocess(self):
for execution_names in [None, [4, 'x', 'y', 'z']]:
with tempfile.TemporaryDirectory() as tmp_dir_name:
executor = EOExecutor(self.workflow, self.execution_args, save_logs=True,
logs_folder=tmp_dir_name,
execution_names=execution_names)
executor.run(workers=3, multiprocess=True)
self.assertEqual(len(executor.execution_logs), 4)
for log in executor.execution_logs:
self.assertTrue(len(log.split()) >= 3)
log_filenames = sorted(os.listdir(executor.report_folder))
self.assertEqual(len(log_filenames), 4)
if execution_names:
for name, log_filename in zip(execution_names, log_filenames):
self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))
def test_execution_logs_multithread(self):
for execution_names in [None, [4, 'x', 'y', 'z']]:
with tempfile.TemporaryDirectory() as tmp_dir_name:
executor = EOExecutor(self.workflow, self.execution_args, save_logs=True,
logs_folder=tmp_dir_name,
execution_names=execution_names)
executor.run(workers=3, multiprocess=False)
self.assertEqual(len(executor.execution_logs), 4)
for log in executor.execution_logs:
self.assertTrue(len(log.split()) >= 3)
log_filenames = sorted(os.listdir(executor.report_folder))
self.assertEqual(len(log_filenames), 4)
if execution_names:
for name, log_filename in zip(execution_names, log_filenames):
self.assertTrue(log_filename == 'eoexecution-{}.log'.format(name))
def test_execution_stats(self):
with tempfile.TemporaryDirectory() as tmp_dir_name:
executor = EOExecutor(self.workflow, self.execution_args, logs_folder=tmp_dir_name)
executor.run(workers=2)
self.assertEqual(len(executor.execution_stats), 4)
for stats in executor.execution_stats:
for time_stat in ['start_time', 'end_time']:
self.assertTrue(time_stat in stats and isinstance(stats[time_stat], datetime.datetime))
def test_execution_errors(self):
for multiprocess in [True, False]:
with tempfile.TemporaryDirectory() as tmp_dir_name:
executor = EOExecutor(self.workflow, self.execution_args, logs_folder=tmp_dir_name)
executor.run(workers=5, multiprocess=multiprocess)
for idx, stats in enumerate(executor.execution_stats):
if idx != 3:
self.assertFalse('error' in stats, 'Workflow {} should be executed without errors'.format(idx))
else:
self.assertTrue('error' in stats and stats['error'],
'This workflow should be executed with an error')
self.assertEqual(executor.get_successful_executions(), [0, 1, 2])
self.assertEqual(executor.get_failed_executions(), [3])
def test_execution_results(self):
for return_results in [True, False]:
executor = EOExecutor(self.workflow, self.execution_args)
results = executor.run(workers=2, multiprocess=True, return_results=return_results)
if return_results:
self.assertTrue(isinstance(results, list))
for idx, workflow_results in enumerate(results):
if idx == 3:
self.assertEqual(workflow_results, None)
else:
self.assertTrue(isinstance(workflow_results, WorkflowResults))
self.assertEqual(workflow_results[self.final_task], 42)
self.assertTrue(self.task not in workflow_results)
else:
self.assertEqual(results, None)
def test_exceptions(self):
with self.assertRaises(ValueError):
EOExecutor(self.workflow, {})
with self.assertRaises(ValueError):
EOExecutor(self.workflow, self.execution_args, execution_names={1, 2, 3, 4})
with self.assertRaises(ValueError):
EOExecutor(self.workflow, self.execution_args, execution_names=['a', 'b'])
if __name__ == '__main__':
unittest.main()
|
<filename>etc/fixtures/generate_data.py
# Generate a fake database for the django application
import datetime
import glob
import lipsum
import json
import osmapi
import sys
import argparse
from nominatim import Nominatim
import os
import random
def basename(x):
# Returns the basename of a file
return os.path.split(x)[1]
def generate_product_data(n, d):
# Generate product registrations
output = []
categories = glob.glob(os.path.join(os.path.abspath(d), '*/'))
pk = 1
for i, c in enumerate(categories):
with open(os.path.join(c, 'data.csv')) as f:
products = f.read().splitlines()
category = os.path.split(c[:-1])[-1]
print(category)
for j, p in enumerate(products[:n]):
q = p.split(', ')
img_url = 'media/supermarket_crawlers/{}/images/{}.jpg'.format(category, j)
if (len(', '.join(q[2:-1])) > 1000):
prname = ', '.join(q[2:-1])[:1000]
else:
prname = ', '.join(q[2:-1])
try:
pr = float(q[1])
except:
pr = 10 * random.random()
user = random.randint(1, 5)
registration = {
'model' : 'cheapiesgr.registration',
'pk' : pk,
'fields' : {
'tags' : '[]',
'product_description' : ', '.join(q[2:-1]),
'name' : prname,
'withdrawn' : False,
'volunteer' : user,
'category' : i + 1,
'date_of_registration' : '2018-11-27',
'image_url' : img_url
}
}
price = {
'model' : 'cheapiesgr.registrationprice',
'pk' : pk,
'fields' : {
'price' : pr,
'date_from' : '2019-01-01',
'date_to' : '2020-01-01',
'shop' : random.randint(1, 5),
'registration' : pk,
'volunteer' : user
}
}
pk += 1
output.extend([price, registration])
return output
def generate_categories_data(n, d):
output = []
categories = glob.glob(os.path.join(os.path.abspath(d), '*/'))
for i, c in enumerate(categories[:n]):
img_path = glob.glob(os.path.join(c, 'images', '*'))
category = {
'model' : 'cheapiesgr.category',
'pk' : i + 1,
'fields' : {
'category_description' : os.path.split(c[:-1])[1],
'category_name' : os.path.split(c[:-1])[1],
}
}
output.extend([category])
return output
def generate_shop_data(n, d):
# Generate points on the map for various stores using OSM Nominatim API
shops = ['Vasilopoulos', 'Sklavenitis', 'Lidl', 'Elomas']
results = []
i = 0
while len(results) <= n or i < len(shops):
results.extend(nom.query(shops[i]))
i += 1
results = results[:n]
output = []
for i, r in enumerate(results):
x = {
'model' : 'cheapiesgr.shop',
'pk' : i + 1,
'fields' : {
'address' : r['display_name'][:500],
'name' : r['display_name'][:500],
'city' : r['display_name'],
'location' : 'POINT({} {})'.format(r['lon'], r['lat'])
}
}
output.append(x)
return output
def generate_user_data(n, d):
# Generate fake user data
output = []
with open('etc/fixtures/FunnyNames.txt') as f:
names = f.read().splitlines()
for i in range(1, n+1):
uname = 'user' + str(i)
temp = names[i].split(' ')
first_name = temp[0]
last_name = ' '.join(temp[1:])
user = {
'model': 'auth.user',
'pk' : i,
'fields' : {
'username' : uname,
'email' : uname + '@example.com',
'password' : '<PASSWORD>',
'first_name' : first_name,
'last_name' : last_name
}
}
volunteer = {
'model' : 'cheapiesgr.volunteer',
'pk' : i,
'fields' : {
'user' : i,
'confirmed_email' : False
}
}
output.extend([user, volunteer])
return output
def generate_qar_data(n, d):
# Generate questions, answers and ratings
output = []
for i in range(1, n + 1):
question = {
'model' : 'cheapiesgr.question',
'pk' : i,
'fields' : {
'question_text' : lipsum.generate_words(20),
'registration' : i
}
}
answer = {
'model' : 'cheapiesgr.answer',
'pk' : i,
'fields' : {
'answer_text' : lipsum.generate_words(20),
'question' : i
}
}
rating = {
'model' : 'cheapiesgr.rating',
'pk' : i,
'fields' : {
'rate_explanation' : lipsum.generate_words(20),
'registration' : i,
'volunteer' : i,
'stars' : random.randint(1, 5),
'validity_of_this_rate' : random.randint(1, 5)
}
}
output.extend([question, answer, rating])
return output
def apply_fixtures(pipeline):
for p in pipeline:
os.system('python3 manage.py loaddata {}.json'.format(p))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', type=int, default=10, help='Number of data')
argparser.add_argument('-t', type=str, default='')
argparser.add_argument('-d', type=str, help='Crawled data directory')
argparser.add_argument('--apply', action='store_true', help='Apply fixtures')
options = {
'shop' : generate_shop_data,
'categories': generate_categories_data,
'products' : generate_product_data,
'user' : generate_user_data,
'qar' : generate_qar_data,
}
nom = Nominatim()
args = argparser.parse_args()
# Generate desired data
if args.t == '':
# Use default pipeline
pipeline = ['shop', 'user', 'categories', 'products', 'qar']
else:
pipeline = [args.t]
for p in pipeline:
output = options[p](args.n, args.d)
# Write to file
with open(p + '.json', 'w+') as f:
f.write(json.dumps(output, ensure_ascii=False))
if args.apply:
apply_fixtures(pipeline)
|
<reponame>GC-HBOC/HerediVar<gh_stars>0
from os import path
import sys
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import argparse
import common.functions as functions
import json
parser = argparse.ArgumentParser(description="")
parser.add_argument("-i", "--input", default="", help="path to input file")
parser.add_argument("-o", "--output", default="", help="output file path. If not given will default to stdout")
parser.add_argument("--header", default=1, help="The line number which has the header (Zero centered, default: 0)")
parser.add_argument("--samples", help="The total number of samples in the maf file")
parser.add_argument("--oncotree", help="Path to Oncotree json file: http://oncotree.mskcc.org/api/tumorTypes")
args = parser.parse_args()
if args.output is not "":
sys.stdout = open(args.output, 'w')
if args.input is not "":
input_file = open(args.input, 'r', encoding='utf-8', errors='ignore')
else:
input_file = sys.stdin
i_header_line = args.header
tot_samples = int(args.samples)
oncotree_path = args.oncotree
## extract 1:1 mapping of oncotree identifiers and names
oncotree_file = open(oncotree_path, 'r')
json_array = oncotree_file.read()
oncotree = json.loads(json_array)
oncotree_dict = {}
for tumor_type in oncotree:
current_code = str(tumor_type['code']).strip().upper()
current_name = str(tumor_type['name']).strip()
oncotree_dict[current_code] = current_name
# add some legacy codes, see: https://github.com/cBioPortal/cancerhotspots/issues/23
# more abbreviations: https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/tcga-study-abbreviations (1)
oncotree_dict['MMYL'] = "Multiple Myeloma" # from oncotree_2018_01_01
oncotree_dict['HGG'] = "Malignant High-Grade Glioma" # taken from internet
oncotree_dict['LGG'] = "Low-Grade Glioma" # taken from internet
oncotree_dict['KIRP'] = "Papillary Renal Cell Carcinoma"
oncotree_dict['LAML'] = "Acute Myeloid Leukemia"
oncotree_dict['OV'] = "High-Grade Serous Ovarian Cancer"
oncotree_dict['LIHC'] = "Hepatocellular Carcinoma"
oncotree_dict['KIRC'] = "Renal Clear Cell Carcinoma"
oncotree_dict['CLL'] = "Chronic Lymphocytic Leukemia" # taken from internet
oncotree_dict['KICH'] = "Chromophobe Renal Cell Carcinoma"
oncotree_dict['LUSM'] = "Small Cell Lung Cancer"
oncotree_dict['PIAS'] = "Pilocytic Astrocytoma"
oncotree_dict['DLBC'] = "Diffuse Large B-Cell Lymphoma"
oncotree_dict['LYMBC'] = "Burkitt Lymphoma"
oncotree_dict['ALL'] = "Acute Lymphoid Leukemia" # from oncotree_2018_01_01
oncotree_dict['PANNET'] = "Pancreatic Neuroendocrine Tumor"
oncotree_dict['SARC'] = "Sarcoma" # from (1)
oncotree_dict['HNSC'] = "Head and Neck Squamous Cell Carcinoma" # from oncotree_2018_01_01
oncotree_file.close()
## parse cancerhotspots
def print_variant(chr, pos, ref, alt, info):
if functions.is_dna(ref) and functions.is_dna(alt):
if ref == '-':
ref = ''
if alt == '-':
alt = ''
chr_symbol = functions.validate_chr(chr)
if chr_symbol: # save only variants from valid chrs
line_to_print = ['chr' + chr, pos, '.', ref, alt, '.', '.', info]
print('\t'.join(line_to_print))
else:
#functions.eprint("WARNING: variant was removed as it is from an unsupported chromosome: " + 'chr' + chr + ' ' + pos + ' . ' + ref + ' ' + alt + ' . ' + ' . ' + info)
pass
else:
#functions.eprint("WARNING: variant was removed as it contains non dna nucleotides: " + 'chr' + chr + ' ' + pos + ' . ' + ref + ' ' + alt + ' . ' + ' . ' + info)
pass
def convert_oncotree_symbol(oncotree_symbol):
oncotree_symbol = str(oncotree_symbol).strip().upper()
if oncotree_symbol != '':
if oncotree_symbol in oncotree_dict:
oncotree_name = oncotree_dict[oncotree_symbol]
return oncotree_name
#functions.eprint("WARNING: encountered unknown oncotree symbol: " + str(oncotree_symbol) + " writing symbol instead of name...")
return oncotree_symbol
# cancertype is oncotreecancertype:oncotreetissue
def prepare_cancertype(cancertype):
parts = cancertype.split(':')
oncotree_name = convert_oncotree_symbol(parts[0])
oncotree_name = oncotree_name.replace(' ', '_')
return oncotree_name + ':' + parts[1]
# contig header lines are required for crossmap lifting of genomic positions
info_header = [
"##contig=<ID=chr1>",
"##contig=<ID=chr2>",
"##contig=<ID=chr3>",
"##contig=<ID=chr4>",
"##contig=<ID=chr5>",
"##contig=<ID=chr6>",
"##contig=<ID=chr7>",
"##contig=<ID=chr8>",
"##contig=<ID=chr9>",
"##contig=<ID=chr10>",
"##contig=<ID=chr11>",
"##contig=<ID=chr12>",
"##contig=<ID=chr13>",
"##contig=<ID=chr14>",
"##contig=<ID=chr15>",
"##contig=<ID=chr16>",
"##contig=<ID=chr17>",
"##contig=<ID=chr18>",
"##contig=<ID=chr19>",
"##contig=<ID=chr20>",
"##contig=<ID=chr21>",
"##contig=<ID=chr22>",
"##contig=<ID=chrX>",
"##contig=<ID=chrY>",
"##contig=<ID=chrMT>",
"##INFO=<ID=cancertypes,Number=.,Type=String,Description=\"A | delimited list of all cancertypes associated to this variant according to cancerhotspots. FORMAT: tumortype:tissue\">",
"##INFO=<ID=AC,Number=1,Type=Integer,Description=\"Number of samples showing the variant from cancerhotspots\">",
"##INFO=<ID=AF,Number=1,Type=Float,Description=\"Allele Frequency of the variant (AC / num samples cancerhotspots)\">",
"##INFO=<ID=tissue,Number=1,Type=String,Description=\"Oncotree tissue type according to the cancertype. \">"]
functions.write_vcf_header(info_header)
i_current_line = -1
is_first_variant = True
for line in input_file:
line = line.strip()
i_current_line += 1
if line.startswith('#') or line == '':
continue
if i_current_line == i_header_line:
parts = line.split('\t')
for i in range(len(parts)):
part = parts[i]
if part == 'Chromosome':
i_chr = i
elif part == 'Start_Position':
i_start = i
elif part == 'Reference_Allele':
i_ref = i
elif part == 'Tumor_Seq_Allele2': # this is the alt allele
i_alt = i
elif part == 'Tumor_Seq_Allele1': # this is the alt allele
i_alt_1 = i
elif part == 'TUMORTYPE':
i_cancertype = i
elif part == 'Tumor_Sample_Barcode':
i_barcode = i
elif part == "oncotree_organtype":
i_tissue = i
header_len = len(parts)
continue
parts = line.split('\t')
if len(parts) != header_len:
#functions.eprint("WARNING: skipping variant because it did not have the correct number of fields. line number in input file: " + str(i_current_line))
continue
chr = parts[i_chr]
pos = parts[i_start]
ref = parts[i_ref]
alt = parts[i_alt]
alt_1 = parts[i_alt_1]
barcode = parts[i_barcode]
if is_first_variant:
previous_chr = chr
previous_pos = pos
previous_ref = ref
previous_alt = alt
previous_barcode = barcode
all_cancer_types = []
ac = 1
is_first_variant = False
# test if previous variant is equal to the current one
if chr != previous_chr or pos != previous_pos or ref != previous_ref or alt != previous_alt:
info = ''
info = functions.collect_info(info, 'cancertypes=', '|'.join([prepare_cancertype(x) for x in set(all_cancer_types)]))
info = functions.collect_info(info, 'AC=', ac)
info = functions.collect_info(info, 'AF=', ac/tot_samples)
print_variant(previous_chr, previous_pos, previous_ref, previous_alt, info)
previous_chr = chr
previous_pos = pos
previous_ref = ref
previous_alt = alt
previous_barcode = barcode
if parts[i_cancertype] != '' or parts[i_tissue] != '': # collect cancertypes only if there is at least some information available
all_cancer_types = [parts[i_cancertype] + ':' + parts[i_tissue]]
else:
all_cancer_types = []
ac = 1
else:
if parts[i_cancertype] != '' or parts[i_tissue] != '':
all_cancer_types.append(parts[i_cancertype] + ':' + parts[i_tissue])
if previous_barcode != barcode:
ac += 1
# dont forget to print the last variant which is not captured in the loop
info = ''
info = functions.collect_info(info, 'cancertypes=', '|'.join([convert_oncotree_symbol(x) for x in set(all_cancer_types)]))
info = functions.collect_info(info, 'AC=', ac)
info = functions.collect_info(info, 'AF=', ac/tot_samples)
print_variant(previous_chr, previous_pos, previous_ref, previous_alt, info)
|
<reponame>djmattyg007/dreg-client<filename>dreg_client/client.py
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable, Dict, Optional, Sequence, TypedDict, cast
from requests import HTTPError, RequestException, Response
from requests_toolbelt.sessions import BaseUrlSession
from .manifest import (
ImageConfig,
ManifestParseOutput,
parse_image_config_blob_response,
parse_manifest_response,
)
from .schemas import schema_2, schema_2_list
if TYPE_CHECKING:
from ._types import RequestsAuth
from .auth_service import AuthService
logger = logging.getLogger(__name__)
HEADERS = Dict[str, str]
scope_catalog = "registry:catalog:*"
scope_repo: Callable[[str], str] = lambda repo: f"repository:{repo}:*"
class CatalogResponse(TypedDict):
repositories: Sequence[str]
class TagsResponse(TypedDict):
name: str
tags: Optional[Sequence[str]]
class Client:
def __init__(
self, session: BaseUrlSession, /, *, auth_service: Optional[AuthService] = None
) -> None:
if session.auth and auth_service:
raise ValueError("Cannot supply session.auth and auth_service together.")
self._session = session
self._auth_service = auth_service
@classmethod
def build_with_session(
cls,
base_url: str,
/,
*,
auth: RequestsAuth = None,
auth_service: Optional[AuthService] = None,
) -> Client:
if auth and auth_service:
raise ValueError("Cannot supply auth and auth_service together.")
session = BaseUrlSession(base_url)
if auth:
session.auth = auth
return Client(session, auth_service=auth_service)
def _head(self, url_path: str, scope: str, headers: Optional[HEADERS] = None) -> Response:
if not headers:
headers = {}
if self._auth_service:
token = self._auth_service.request_token(scope)
headers["Authorization"] = f"Bearer {token}"
response = cast(Response, self._session.head(url_path, headers=headers))
response.raise_for_status()
return response
def _get(self, url_path: str, scope: str, headers: Optional[HEADERS] = None) -> Response:
if not headers:
headers = {}
if self._auth_service:
token = self._auth_service.request_token(scope)
headers["Authorization"] = f"Bearer {token}"
response = cast(Response, self._session.get(url_path, headers=headers))
response.raise_for_status()
return response
def _delete(self, url_path: str, scope: str, headers: Optional[HEADERS] = None) -> Response:
if not headers:
headers = {}
if self._auth_service:
token = self._auth_service.request_token(scope)
headers["Authorization"] = f"Bearer {token}"
response = cast(Response, self._session.delete(url_path, headers=headers))
response.raise_for_status()
return response
def check_status(self) -> bool:
try:
response = self._get("", scope_catalog)
response.json()
except (ValueError, RequestException):
return False
else:
return True
def catalog(self) -> CatalogResponse:
response = self._get("_catalog", scope_catalog)
return cast(CatalogResponse, response.json())
def get_repository_tags(self, name: str) -> TagsResponse:
response = self._get(f"{name}/tags/list", scope_repo(name))
return cast(TagsResponse, response.json())
def check_manifest(self, name: str, reference: str) -> Optional[str]:
headers: HEADERS = {
"Accept": ",".join((schema_2, schema_2_list)),
}
try:
response = self._head(
f"{name}/manifests/{reference}", scope_repo(name), headers=headers
)
except HTTPError as exc:
if exc.response.status_code == 404:
return None
raise
return response.headers.get("Docker-Content-Digest", None)
def get_manifest(self, name: str, reference: str) -> ManifestParseOutput:
headers: HEADERS = {
"Accept": ",".join((schema_2, schema_2_list)),
}
response = self._get(f"{name}/manifests/{reference}", scope_repo(name), headers=headers)
return parse_manifest_response(response)
def delete_manifest(self, name: str, digest: str) -> Response:
response = self._delete(f"{name}/manifests/{digest}", scope_repo(name))
return response
def get_image_config_blob(self, name: str, digest: str) -> ImageConfig:
response = self.get_blob(name, digest)
return parse_image_config_blob_response(response)
def get_blob(self, name: str, digest: str) -> Response:
response = self._get(f"{name}/blobs/{digest}", scope_repo(name))
return response
def delete_blob(self, name: str, digest: str) -> Response:
response = self._delete(f"{name}/blobs/{digest}", scope_repo(name))
return response
__all__ = ("Client",)
|
"""Process DynamoRIO's instr_create.h file and generate a simplied
instruction creation API for use by Granary.
Author: <NAME> (<EMAIL>)
Copyright: Copyright 2012-2013 <NAME>, all rights reserved.
"""
import re
# Generated source files.
code = open('granary/gen/instruction.cc', 'w')
header = open('granary/gen/instruction.h', 'w')
# Locate the argument used to pass a pointer do a dcontext_t
# within the parameter list of a function-like macro.
DC = re.compile(r"([ (,])dc([ ),])")
# Output code to the `gen/instruction.cc` source code file.
def C(*args):
global code
code.write("%s\n" % "".join(map(str, args)))
# Output code to the `gen/instruction.h` header file.
def H(*args):
global header
header.write("%s\n" % "".join(map(str, args)))
# Locate a instruction/operand-creation macro.
CREATE_MACRO = re.compile(
r"#\s*define\s+(INSTR|OPND)_CREATE_([a-zA-Z0-9_]+)\((.*?)\)")
AFTER_CREATE_MACRO = re.compile(r"(.*?)\)(.*)")
MACRO_USE = re.compile(r"INSTR_CREATE_([a-zA-Z0-9_]+)")
MACRO_DEF = re.compile(r"^#\s*define")
OPCODE_USE = re.compile(r"::OP_([a-zA-Z0-9_]+)", re.MULTILINE)
SEEN_OPCODES = set(["jmpe", "jmpe_abs"])
# Format a line and make it safe for inclusion into a Granary
# C++ file.
def format_line(line):
return format_line_ns(line.strip("\r\n\t \\"))
# Convert various DynamoRIO-specific names into namespaced
# versions of themselves.
def format_line_ns(line):
line = line.replace("dcontext_t", "dynamorio::dcontext_t")
line = line.replace("reg_id_t", "dynamorio::reg_id_t")
line = line.replace("instr_t", "dynamorio::instr_t")
line = line.replace("OP_", "dynamorio::OP_")
line = line.replace("opnd_", "dynamorio::opnd_")
line = line.replace("DR_REG_X", "DR_REG_R")
line = line.replace("DR_REG_RMM", "DR_REG_XMM")
line = line.replace("DR_", "dynamorio::DR_")
line = line.replace("OPSZ", "dynamorio::OPSZ")
line = line.replace("ptr_int_t", "dynamorio::ptr_int_t")
line = line.replace("instr_create_", "dynamorio::instr_create_")
return line
# Collect and format the lines of the macro. Returns those lines as
# a tuple of the list of strings, and the next line number to be
# parsed.
def collect_macro_lines(lines, i):
sub_lines = [format_line(AFTER_CREATE_MACRO.match(lines[i]).group(2)), ]
while i < len(lines):
line = lines[i].rstrip("\r\n\t ")
if line and line[-1] == "\\":
sub_lines.append(format_line(lines[i + 1].strip("\n\r\t \\")))
i = i + 1
continue
break
sub_lines = filter(None, sub_lines)
return sub_lines, i + 1
# Given a function applied to the macro parameter names, return a string
# representing a valid type parameter list, or `void` if there are no
# parameters.
def build_typed_arg_list(typing_func, args):
arg_list = "void"
if len(args):
arg_list = ", ".join(map(typing_func, enumerate(args)))
return arg_list
# Set of instructions whose DynamoRIO macro form must be emitted.
MUST_EMIT_INSTR_MACRO = set([
"nop", "mov_st", "lea", "RAW_nop", "jcc"
])
# Emit the a Granary-form of instruction creation, which is a C++
# function.
def emit_instr_function(lines, i, instr, args):
emit_to_code = instr in MUST_EMIT_INSTR_MACRO
sub_lines, i = collect_macro_lines(lines, i)
if emit_to_code:
C("#define INSTR_CREATE_", instr, "(", ",".join(args), ") \\")
C(" ", "\\\n ".join(sub_lines))
sub_lines.append(";")
def typed_arg(p):
i, a = p
if "op" == a:
return "int " + a
elif "n" == a:
return "unsigned " + a
else:
return "dynamorio::opnd_t " + a
arg_list = build_typed_arg_list(typed_arg, args[1:])
copied_code = "\n ".join(sub_lines)
# filter out floating point instructions.
if "float" in copied_code or "double" in copied_code:
return i
func_name = instr + "_"
if "jecxz" == instr:
H(" instruction ", func_name, "(", arg_list, ");")
func_name = "jrcxz_"
# emit the new function
H(" instruction ", func_name, "(", arg_list, ");")
C("instruction ", func_name, "(", arg_list, ") {")
# this is a hack: the way be build base/disp operand types from
# registers is such that the register size can propagate through.
# this is an attempt to fix it in some cases.
if "lea" == instr:
C(" %s.size = dynamorio::OPSZ_lea;" % args[2])
for arg in args[1:]:
C(" (void) ", arg, ";")
if len(args) and "dc" == args[0]:
copied_code = DC.sub(r"\1(instruction::DCONTEXT)\2", copied_code)
ret = "return "
if "return" in copied_code:
ret = ""
C(" ", ret, copied_code)
C("}")
if "dynamorio::OP_" in copied_code:
m = OPCODE_USE.search(copied_code)
if m:
opcode = m.group(1)
if "call" == opcode or "j" in opcode or "loop" in opcode:
if opcode not in SEEN_OPCODES:
SEEN_OPCODES.add(opcode)
return i
# Set of operands whose DynamoRIO macro form must be emitted.
MUST_EMIT_OPND_MACRO = set([
"INT8", "INT32", "INT16", "MEM_lea"
])
# Emit the a Granary-form of operand creation, which is a C++
# function.
def emit_opnd_function(lines, i, opnd, args):
emit_to_code = opnd in MUST_EMIT_OPND_MACRO
sub_lines, i = collect_macro_lines(lines, i)
if emit_to_code:
C("#define OPND_CREATE_", opnd, "(", ",".join(args), ") \\")
C(" ", "\\\n ".join(sub_lines))
sub_lines.append(";")
def typed_arg(p):
i, a = p
if "reg" in a:
return "dynamorio::reg_id_t " + a
elif "addr" in a:
return "void *" + a
else:
return "uint64_t " + a
arg_list = build_typed_arg_list(typed_arg, args)
H(" operand ", opnd.lower(), "_(", arg_list, ");")
C(" operand ", opnd.lower(), "_(", arg_list, ") {")
content = "\n ".join(sub_lines)
ret = "return "
if "return" in content:
ret = ""
C(" ", ret, content)
C(" }")
return i
with open("deps/dr/x86/instr_create.h") as lines_:
C('#include <limits.h>')
C('#include <stdint.h>')
C('#include "granary/dynamorio.h"')
C('#include "granary/instruction.h"')
C('#ifndef GRANARY')
C('# define GRANARY')
C('#endif')
C('#ifndef X64')
C("# define X64")
C('#endif')
C('#define IF_X64_ELSE(t,f) (t)')
C('namespace granary {')
H('#ifndef GRANARY')
H('# define GRANARY')
H('#endif')
H('#ifndef GRANARY_GEN_INSTRUCTION_H_')
H('#define GRANARY_GEN_INSTRUCTION_H_')
H("namespace granary {")
H(' inline operand pc_(app_pc pc) { return dynamorio::opnd_create_pc(pc); }')
H(' inline operand far_pc_(uint16_t sel, app_pc pc) { return dynamorio::opnd_create_far_pc(sel, pc); }')
H(' inline operand instr_(dynamorio::instr_t *instr) { return dynamorio::opnd_create_instr(instr); }')
H(' inline operand far_instr_(uint16_t sel, dynamorio::instr_t *instr) { return dynamorio::opnd_create_far_instr(sel, instr); }')
H(' operand mem_pc_(app_pc *);')
H(' operand mem_instr_(dynamorio::instr_t *);')
H(' instruction persistent_label_(dynamorio::instr_t *);')
lines = list(lines_)
i = 0
while i < len(lines):
line = lines[i]
m = CREATE_MACRO.match(line)
# didn't match a specific macro def or has a special macro
# used to signal that this line should be ignored.
if not m or "GRANARY_IGNORE" in line:
i += 1
continue
emitter = None
func_name = m.group(2)
args = map(str.strip, m.group(3).split(","))
# instruction
if "INSTR" == m.group(1):
emitter = emit_instr_function
else:
emitter = emit_opnd_function
i = emitter(lines, i, func_name, args)
C('}')
C()
H(' inline instruction cmpxchg16b_(operand d) { d.size = dynamorio::OPSZ_8_rex16; return cmpxchg8b_(d); }')
H('}')
H('#endif /* GRANARY_GEN_INSTRUCTION_H_ */')
H()
|
import os
import csv
import json
import logging
import types
import subprocess
import datetime
import shlex
import traceback
import time
from os.path import join
from django.conf import settings
from collections import OrderedDict
from django.db import transaction
from django.db.utils import IntegrityError
from tworaven_apps.data_prep_utils.duplicate_column_remover import (
DuplicateColumnRemover,
)
from tworaven_apps.utils.view_helper import get_json_error
from tworaven_apps.utils.mongo_util import infer_type, encode_variable
from tworaven_apps.utils.basic_response import ok_resp, err_resp
from tworaven_apps.eventdata_queries import static_vals as evt_static
from tworaven_apps.eventdata_queries.models import (
EventDataSavedQuery,
ArchiveQueryJob,
UserNotification,
SEARCH_PARAMETERS,
SEARCH_KEY_NAME,
SEARCH_KEY_DESCRIPTION,
IN_PROCESS,
ERROR,
COMPLETE,
DATA_PARTITIONS,
MongoDataset,
)
from tworaven_apps.eventdata_queries.dataverse.temporary_file_maker import (
TemporaryFileMaker,
)
from tworaven_apps.eventdata_queries.dataverse.dataverse_publish_dataset import (
DataversePublishDataset,
)
from tworaven_apps.eventdata_queries.dataverse.dataverse_list_files_dataset import (
ListFilesInDataset,
)
from tworaven_apps.eventdata_queries.dataverse.get_dataset_file_info import (
GetDataSetFileInfo,
)
from tworaven_apps.eventdata_queries.mongo_retrieve_util import MongoRetrieveUtil
from tworaven_apps.eventdata_queries.generate_readme import GenerateReadMe
from tworaven_apps.eventdata_queries.dataverse.routine_dataverse_check import (
RoutineDataverseCheck,
)
from tworaven_apps.ta2_interfaces.basic_problem_writer import BasicProblemWriter
from tworaven_apps.raven_auth.models import User
from tworaven_apps.user_workspaces.models import UserWorkspace
from tworaven_apps.utils.url_helper import format_file_uri_to_path
LOGGER = logging.getLogger(__name__)
class EventJobUtil(object):
"""Convenience class for the eventdata queries """
dataverse_server = settings.DATAVERSE_SERVER # no trailing slash
api_key = settings.DATAVERSE_API_KEY # generated from kripanshu's account
persistentId = settings.DATASET_PERSISTENT_ID # doi or hdl of the dataset
@staticmethod
def get_by_id_and_user(query_id, user):
"""get object by id"""
if not isinstance(user, User):
user_msg = "A user was not specified"
return err_resp(user_msg)
try:
saved_query = EventDataSavedQuery.objects.get(pk=query_id, user=user)
except EventDataSavedQuery.DoesNotExist:
user_msg = "A query was not found for the" " given query id and user"
return err_resp(user_msg)
return ok_resp(saved_query)
@staticmethod
def search_objects(user, json_search_info):
"""Search for EventDataSavedQuery objects saved by the given user"""
if not isinstance(json_search_info, dict):
user_msg = (
"Expected a the search info to be a python dict" " (unusual error)"
)
return err_resp(user_msg)
if not json_search_info:
user_msg = "Please enter at least 1 search term."
return err_resp(user_msg)
if not isinstance(user, User):
user_msg = "A user was not specified"
return err_resp(user_msg)
# Make sure the search parameters are valid
#
for key, val in json_search_info.items():
if key not in SEARCH_PARAMETERS:
user_msg = (
'"%s" is not a valid search parameter.' " Valid parameters: %s"
) % (key, ", ".join(SEARCH_PARAMETERS))
return err_resp(user_msg)
if not val:
user_msg = ("A value is needed for the search" ' parameter "%s"') % (
key,
)
return err_resp(user_msg)
filters = dict()
if SEARCH_KEY_DESCRIPTION in json_search_info:
filters["description__icontains"] = json_search_info[SEARCH_KEY_DESCRIPTION]
if SEARCH_KEY_NAME in json_search_info:
filters["name__icontains"] = json_search_info[SEARCH_KEY_NAME]
if not filters: # shouldn't happen b/c just checked
user_msg = "Please enter at least 1 search term."
return err_resp(user_msg)
query_results = EventDataSavedQuery.get_query_list_for_user(user, **filters)
if not query_results.success:
return err_resp(query_results.err_msg)
final_results = query_results.result_obj
final_results["search_params"] = json_search_info
final_results.move_to_end("search_params", last=False)
return ok_resp(final_results)
@staticmethod
def get_query_from_object(query_id, user):
""" return query obj"""
return get_json_error("temp disabled!!")
print("-" * 40)
print("getting object for query_id %s" % query_id)
print("-" * 40)
success, event_obj = EventJobUtil.get_by_id_and_user(query_id, user)
if not success:
return get_json_error(event_obj)
# print("event data obj ", event_obj.as_dict()['query'])
# check if the entry is allowed to be saved on dataverse
if not event_obj.as_dict()["save_to_dataverse"]:
return err_resp("save to dataverse is set to False")
print("-" * 40)
print("Routine Dataverse Check")
print("-" * 40)
# run dataverse check :
success_dataverse_check, check_obj = EventJobUtil.run_dataverse_check()
if not success_dataverse_check:
# add to user notification model
EventJobUtil.add_to_user_model("test_user", query_id, check_obj)
return err_resp(check_obj)
print("-" * 40)
print("Uploading query file")
print("-" * 40)
# send query_file to dataverse:
success_query, query_obj = EventJobUtil.upload_query_result(event_obj)
if not success_query:
return get_json_error(query_obj)
# make readme file and upload
print("-" * 40)
print("Uploading query result")
print("-" * 40)
success_readme, readme_ob = EventJobUtil.upload_query_readme(
event_obj.as_dict()
)
if not success_readme:
return get_json_error(readme_ob)
print("Generated read me uploaded to dataverse", readme_ob)
# publish dataset
print("-" * 40)
print("publishing dataset")
print("-" * 40)
success_publish_dataset, published_dataset_obj = EventJobUtil.publish_dataset()
if not success_publish_dataset:
# add to user notification model
EventJobUtil.add_to_user_model("test_user", query_id, published_dataset_obj)
return err_resp(published_dataset_obj)
print("-" * 40)
print("Adding Query Object")
print("-" * 40)
# add to query obj
# first get the dataset file info of latest version
job_file_info = GetDataSetFileInfo()
success_file_info, res_info = job_file_info.return_status()
if not success_file_info:
return err_resp(res_info)
latest_version = res_info["data"]["latestVersion"]["versionNumber"]
has_error = False
error_list = []
ok_response = []
for d in res_info["data"]["latestVersion"]["files"]:
file_id = d["dataFile"]["id"]
file_url = (
EventJobUtil.dataverse_server
+ "/file.xhtml?fileId="
+ str(file_id)
+ "&version="
+ str(latest_version)
)
try:
saved_query = EventDataSavedQuery.objects.get(id=query_id)
except ValueError:
return err_resp("Could not retrieve query for id %s" % query_id)
try:
search_obj = ArchiveQueryJob.objects.get(datafile_id=file_id)
except ArchiveQueryJob.DoesNotExist:
search_obj = None
if search_obj is None:
succ, add_archive = EventJobUtil.add_archive_query_job(
datafile_id=file_id,
saved_query=saved_query,
status=COMPLETE,
is_finished=True,
is_success=True,
message="query result successfully created",
dataverse_response=d,
archive_url=file_url,
)
if not succ:
has_error = True
error_list.append("Could not add the object with file id %s" % file_id)
else:
has_error = True
error_list.append("Object with file ID %s already exists" % file_id)
if has_error:
# save to user notification
return err_resp(error_list)
else:
return ok_resp(ok_response)
@staticmethod
def add_archive_query_job(**kwargs):
""" add to the database of archive jobs"""
job = ArchiveQueryJob(**kwargs)
job.save()
# return True,"All good"
# print("job :", job.as_dict())
if job.id:
"""no error"""
usr_dict = dict(
success=True, message="query job archived", data=job.as_dict()
)
return ok_resp(usr_dict)
else:
"""error"""
usr_dict = dict(success=False, message="failed to archive query", id=job.id)
return err_resp(usr_dict)
@staticmethod
def get_all_archive_query_objects():
""" get list of all objects"""
success, get_list_obj = ArchiveQueryJob.get_all_objects()
if success:
return ok_resp(get_list_obj)
else:
return err_resp(get_list_obj)
@staticmethod
def publish_dataset():
""" publish dataset
might be using dataset_id later according to actual API request
"""
job = DataversePublishDataset()
# job2 = GetDataSetFileInfo()
succ, res = job.return_status()
if not succ:
print("message from dataverse publish fail ", res)
return err_resp(res)
else:
print("message from dataverse publish success ", res)
return ok_resp(res)
@staticmethod
def get_dataverse_files(version_id):
""" get list"""
list_obj = ListFilesInDataset(version_id)
succ, res = list_obj.return_status()
if succ:
return ok_resp(res)
else:
return err_resp(res)
@staticmethod
def add_archive_query_job(**kwargs):
""" add to the database of archive jobs"""
job = ArchiveQueryJob(**kwargs)
job.save()
# return True,"All good"
print("job :", job.as_dict())
if job.id:
"""no error"""
usr_dict = dict(
success=True, message="query job archived", data=job.as_dict()
)
return ok_resp(usr_dict)
else:
"""error"""
usr_dict = dict(success=False, message="failed to archive query", id=job.id)
return err_resp(usr_dict)
@staticmethod
def get_archive_query_object(datafile_id):
""" get the data for datafile_id object"""
success, get_list_obj = ArchiveQueryJob.get_objects_by_id(datafile_id)
print("event util obj", get_list_obj)
if success:
return ok_resp(get_list_obj)
else:
return err_resp(get_list_obj)
@staticmethod
def get_all_archive_query_objects():
""" get list of all objects"""
success, get_list_obj = ArchiveQueryJob.get_all_objects()
if success:
return ok_resp(get_list_obj)
else:
return err_resp(get_list_obj)
@staticmethod
def publish_dataset(dataset_id):
""" publish dataset
might be using dataset_id later according to actual API request
"""
job = DataversePublishDataset()
job2 = GetDataSetFileInfo()
succ, res = job.return_status()
if succ:
success, res_info = job2.return_status()
print("Res : ********* : ", res_info)
if success:
for d in res_info["data"]["latestVersion"]["files"]:
print("*******")
file_id = d["dataFile"]["id"]
file_url = d["dataFile"]["pidURL"]
success, archive_job = ArchiveQueryJob.get_objects_by_id(file_id)
if success:
archive_job.archive_url = file_url
archive_job.save()
return ok_resp(res)
else:
return err_resp(archive_job)
else:
return err_resp(res_info)
else:
return err_resp(res)
@staticmethod
def get_dataverse_files(version_id):
""" get list"""
list_obj = ListFilesInDataset(version_id)
succ, res = list_obj.return_status()
if succ:
return ok_resp(res)
else:
return err_resp(res)
@staticmethod
def upload_query_result(event_obj):
"""upload query result to dataverse"""
collection_name = event_obj.as_dict()["collection_name"]
query_obj = event_obj.as_dict()["query"]
query_id = event_obj.as_dict()["id"]
filename = "%s_%s.txt" % (str(query_id), str(collection_name))
obj = MongoRetrieveUtil(settings.EVENTDATA_DB_NAME, collection_name)
success, mongo_obj = obj.run_query(query_obj, "aggregate")
if not mongo_obj:
return err_resp(mongo_obj)
json_dump = json.dumps(mongo_obj)
temp_file_obj = TemporaryFileMaker(filename, json_dump)
succ, res_obj = temp_file_obj.return_status()
print("query result upload : ", res_obj)
if succ:
return ok_resp(res_obj)
else:
return err_resp(res_obj)
@staticmethod
def upload_query_readme(kwargs):
"""upload query_readme result to dataverse """
obj = GenerateReadMe(kwargs)
success, readme_obj = obj.generate_readme()
if not success:
return err_resp(readme_obj)
return ok_resp(readme_obj)
@staticmethod
def run_dataverse_check():
""" check dataverse"""
check_obj = RoutineDataverseCheck()
success_check, check = check_obj.check_result_status()
if not success_check:
return err_resp(check)
else:
return ok_resp(check)
@staticmethod
def add_to_user_model(user_name, query_id, message):
""" add to user notification model """
user_object = User.objects.get(username=user_name)
if not user_object:
return err_resp("could not find user with name %s" % user_name)
try:
saved_query = EventDataSavedQuery.objects.get(id=query_id)
except ValueError:
return err_resp("Could not retrieve query for id %s" % query_id)
query = saved_query.as_dict()["query"]
input_data = dict(
user=user_object, message=message, read=False, archived_query=query
)
user_notify = UserNotification(**input_data)
user_notify.save()
if user_notify.id:
"""no error"""
usr_dict = dict(
success=True, message="query saved", data=user_notify.as_dict()
)
return ok_resp(usr_dict)
else:
"""error"""
usr_dict = dict(
success=False, message="failed to save query", id=user_notify.id
)
return err_resp(usr_dict)
@staticmethod
def get_metadata(folder, names=None):
"""
Open one of the folders such as 'collections', 'formats', 'geojson' or 'alignments'
Read through the files and return a dict with:
{key: file contents}
Example, for "collections/terrier.json":
{
"terrier": {
"name": "Temporally Extended Regularly Reproducible International Event Records (Terrier)",
"key": "terrier",
"description": "Event data records [etc, etc]",
"interval": "1979 - 2016",
etc...
}
}
names - optional list of filenames, without the extension.
e.g. ['acled_africa', 'cline_phoenix_nyt', 'terrier']
"""
directory = join(os.getcwd(), "tworaven_apps", "eventdata_queries", folder)
if names:
# add .json to name, if needed
names = [x if x.endswith(".json") else x + ".json" for x in names]
# make sure name is in directory and has file extension
names = [x for x in names if x in os.listdir(directory)]
else:
# Use all names in the directory
#
names = sorted(os.listdir(directory))
# For the current collections, only allow
# those from settings.EVENTDATA_DATASETS
#
if folder == evt_static.FOLDER_COLLECTIONS:
names = [fname for fname in names if fname in settings.EVENTDATA_DATASETS]
# Construct dict of collection names and file contents
#
#
collection_info = {}
for fname in names:
collection_info[fname.replace(".json", "")] = json.load(
open(join(directory, fname), "r"), object_pairs_hook=OrderedDict
)
return collection_info
"""return {
filename.replace('.json', ''): \
json.load(open(join(directory, fname), 'r'),
object_pairs_hook=OrderedDict)
for filename in names
}"""
@staticmethod
def get_data(database, collection, method, query, distinct=None, host=None):
"""Return data from a Mongo query"""
if method == "distinct" and not distinct:
return err_resp("the distinct method requires a 'keys' argument")
retrieve_util = MongoRetrieveUtil(database, collection, host)
success, data = retrieve_util.run_query(query, method, distinct)
return ok_resp(data) if success else err_resp(data)
@staticmethod
def import_dataset(
database,
collection,
data_path,
reload=False,
header=True,
columns=None,
indexes=None,
delimiter=None,
):
"""Key method to load a Datafile into Mongo as a new collection"""
# print('--> import_dataset --')
retrieve_util = MongoRetrieveUtil(database, collection)
db_info = retrieve_util.get_mongo_db(database)
if not db_info.success:
return err_resp(db_info.err_msg)
db = db_info.result_obj
collection_name = settings.MONGO_COLLECTION_PREFIX + collection
# dataset already loaded in mongo
#
if collection_name in db.list_collection_names():
if reload:
db[collection_name].drop()
MongoDataset.objects.filter(name=collection_name).delete()
else:
# print('--> import_dataset: data in database, no data in django, not reloading')
# make sure database entry exists
dataset_records = MongoDataset.objects.filter(name=collection_name)
if dataset_records:
dataset_record = dataset_records[0]
dataset_record.loading = False
dataset_record.save()
else:
MongoDataset.objects.create(name=collection_name, loading=False)
return ok_resp({"collection": collection_name})
else:
# if data is not loaded, make sure record is not in database
try:
MongoDataset.objects.filter(name=collection_name).delete()
except MongoDataset.DoesNotExist:
pass
# print('data not loaded, and no data in django')
# create lockable record
dataset_record, created = MongoDataset.objects.get_or_create(name=collection_name, defaults={'loading': True})
# if not MongoDataset.objects.filter(name=collection_name):
# MongoDataset.objects.create(name=collection_name, loading=True)
#
# # lock on record
# dataset_record = MongoDataset.objects.get(name=collection_name)
if not created:
for _ in range(1000):
if not dataset_record.loading:
return ok_resp({"collection": collection_name})
time.sleep(1)
dataset_record.refresh_from_db()
return err_resp(f"the mongo import loading process for {collection_name} timed out")
# print(collection_name + ' does not yet exist. Importing.\n\n\n\n')
if not data_path:
return err_resp("The file_uri cannot be None or an empty string.")
if not os.path.exists(data_path):
return err_resp(collection + " not found")
# Convert the file uri to a path
#
fpath, err_msg = format_file_uri_to_path(data_path)
if err_msg:
return err_resp(err_msg)
# for mongoimport commands
#
import_commands = []
# -------------------------------------
# ignore first line of input files
# -------------------------------------
if header:
import_commands.append(f"tail -n +2")
# -------------------------------------
# standardize column metadata to dict
# -------------------------------------
if not columns:
columns = DuplicateColumnRemover(data_path, rewrite=False).updated_columns
if isinstance(columns, list):
columns = {col: None for col in columns}
# -------------------------------------
# standardize dict's tworavens types to mongo,
# try to be flexible with alternative words
# -------------------------------------
def mongofy_type(value):
return {
bool: "boolean",
"boolean": "boolean",
str: "string",
"string": "string",
int: "int32",
"int32": "int32",
"int": "int32",
float: "double",
"double": "double",
"float": "double",
datetime.datetime: "date",
"date": "date",
}.get(value, "auto")
columns = {col: mongofy_type(columns[col]) for col in columns}
# -------------------------------------
# Prepare field names and set delimiter
# for Mongo import/insert
# -------------------------------------
def sanitize(column):
return encode_variable(column).replace('"', '\\"')
field_names = ",".join(
f"{sanitize(col)}.{columns.get(col, 'auto')}()" for col in columns
)
# print('field_names', field_names)
delimiter_type = "csv"
if os.path.splitext(data_path)[1] == "tsv":
delimiter_type = "tsv"
if delimiter in [None, ","]:
pass
elif delimiter == "\t":
delimiter_type = "tsv"
else:
import_commands.append(f'tr "{delimiter}" "\t" <')
delimiter_type = "tsv"
delimiter = {"csv": ",", "tsv": "\t"}[delimiter_type]
# ------------------------------------------
# TEMP skip this for k8s...
# ---
# Prepare and run the mongoimport command
# ------------------------------------------
# try:
if False: # try:
import_commands.append(
f"mongoimport"
f" --db {database}"
f" --collection {settings.MONGO_COLLECTION_PREFIX + collection}"
f" --type {delimiter_type}"
f" --ignoreBlanks"
f" --columnsHaveTypes"
f" --parseGrace autoCast"
f" --drop"
f" --numInsertionWorkers=4"
f' --fields "{field_names}"'
)
# the first command takes the data path, which is piped through the other commands
import_commands[0] = import_commands[0] + " " + data_path
# print('--> import_dataset: mongoimport command:')
# print('-->' + ' | '.join(import_commands))
# pipe each command to the next
# print('--> start subprocess')
process = subprocess.Popen(
shlex.split(import_commands[0]), stdout=subprocess.PIPE
)
for command in import_commands[1:]:
# print('--> command (bracketed): [%s]' % command)
process = subprocess.Popen(
shlex.split(command), stdin=process.stdout, stdout=subprocess.PIPE
)
process.communicate()
for column in columns.keys():
db[collection_name].update(
{column: {"$exists": False}}, {"$set": {column: None}}, multi=True
)
else: # except Exception as err:
# slower, secondary import if first fails
# print('--> mongo err: [%s]' % err)
# print(traceback.format_exc())
print(
"--> import_dataset: mongoimport disabled. Running row-by-row insertion instead."
)
db[collection_name].drop()
with open(data_path, "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=delimiter)
# discard header
next(csv_reader)
# use duplicate column name removal headers instead
columns = [encode_variable(value) for value in columns]
for observation in csv_reader:
db[collection_name].insert_one({col: infer_type(val) for col, val in zip(columns, observation)})
if indexes:
for index in indexes:
# print('creating index ', index, ' on ', collection_name)
db[collection_name].create_index(index)
dataset_record.loading = False
dataset_record.save()
return ok_resp({"collection": collection_name})
@staticmethod
def delete_dataset(database, collection):
retrieve_util = MongoRetrieveUtil(database, collection)
db_info = retrieve_util.get_mongo_db(database)
if not db_info.success:
return err_resp(db_info.err_msg)
db = db_info.result_obj
db[collection].drop()
@staticmethod
def export_csv(user_workspace, collection, data):
"""Export the dataset using the 'BasicProblemWriter' """
if not isinstance(user_workspace, UserWorkspace):
user_msg = "The user_workspace was not set correctly." " (export_dataset)"
return err_resp(user_msg)
if not isinstance(data, list) and not isinstance(data, types.GeneratorType):
user_msg = 'export_dataset failed. "data" must be a list or generator'
LOGGER.error(user_msg)
return err_resp(user_msg)
filename = os.path.join(
"manipulation_data", collection, "TRAIN", "tables", "learningData.csv"
)
params = {
BasicProblemWriter.IS_CSV_DATA: True,
BasicProblemWriter.INCREMENT_FILENAME: True,
BasicProblemWriter.QUOTING: csv.QUOTE_NONNUMERIC,
}
bpw = BasicProblemWriter(user_workspace, filename, data, **params)
if bpw.has_error():
return err_resp(bpw.get_error_message())
return ok_resp(bpw.new_filepath)
@staticmethod
def export_dataset(user_workspace, data, metadata):
"""Export the problem in a D3M-compatible format"""
if not isinstance(user_workspace, UserWorkspace):
user_msg = "The user_workspace was not set correctly." " (export_problem)"
return err_resp(user_msg)
if not isinstance(data, list) and not isinstance(data, types.GeneratorType):
user_msg = 'export_problem failed. "data" must be a list or generator'
LOGGER.error(user_msg)
return err_resp(user_msg)
if not data:
user_msg = 'export_problem failed. "data" must be non-empty'
LOGGER.error(user_msg)
return err_resp(user_msg)
d3m_config = user_workspace.d3m_config
manipulations_folderpath = os.path.join(
d3m_config.get_temp_directory(), "manipulations"
)
extension = 0
while os.path.exists(os.path.join(manipulations_folderpath, str(extension))):
extension += 1
# directory that contains entire dataset
temp_dataset_folderpath = os.path.join(manipulations_folderpath, str(extension))
# paths to datasetDoc and .csv
temp_metadata_filepath = os.path.join(
temp_dataset_folderpath, "datasetDoc.json"
)
temp_data_filepath = os.path.join(
temp_dataset_folderpath, "tables", "learningData.csv"
)
try:
os.makedirs(os.path.join(temp_dataset_folderpath, "tables"))
except OSError:
pass
first_row = None
data_source = None
if isinstance(data, list):
first_row = data[0]
data_source = data
if isinstance(data, types.GeneratorType):
first_row = next(data)
def file_data():
yield first_row
yield from data
data_source = file_data()
# the BasicProblemWriter doesn't write to write_directory, and this doesn't seem trivial to change
columns = list(first_row.keys())
with open(temp_data_filepath, "w", newline="") as output_file:
dict_writer = csv.DictWriter(
output_file,
quoting=csv.QUOTE_NONNUMERIC,
fieldnames=columns,
extrasaction="ignore",
)
dict_writer.writeheader()
dict_writer.writerows(data_source)
# find first table
resource = next(
(res for res in metadata["dataResources"] if res["resType"] == "table"),
None,
)
if not resource:
return err_resp("Dataset must contain at least one tabular resource.")
# rewrite colIndex of passed datasetDoc to match actual column order
column_lookup = {struct["colName"]: struct for struct in resource["columns"]}
resource["columns"] = [
{**column_lookup[name], "colIndex": i}
for i, name in enumerate(columns)
if name in column_lookup
]
with open(temp_metadata_filepath, "w") as metadata_file:
json.dump(metadata, metadata_file)
return ok_resp(
{"data_path": temp_data_filepath, "metadata_path": temp_metadata_filepath, "metadata": metadata}
)
#
# EventJobUtil.import_dataset(
# 'tworavens', 'test',
# '/home/shoe/ravens_volume/solvers/produce/1ee20684-ca0d-4847-b7f0-2595f3594dc1.csv',
# reload=True, columns={'d3mIndex': 'int', 'p_0': 'float', 'p_1': 'float'})
|
import cv2
import numpy as np
from PIL import Image
import os
# CALCULA A IOU MÉDIA DA MASCARA DO COLOR_CLASSIFIER COM O GROUND TRUTH
# img = cv2.imread('images/mask.jpg')
# gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# output = gray_img
#
# img = cv2.imread('../segmentation_dataset/ground_truth/fire000_gt.png')
# gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# label = gray_img
# # print(type(gray_img))
# # cv2.imwrite('test.png', gray_img)
#
# SMOOTH = 1e-6
# # print(output.shape)
# # output = output.squeeze(1)
#
# intersection = (output & label).sum()
# # print(intersection)
# union = (output | label).sum()
# # print(union)
#
# iou = (intersection + SMOOTH) / (union + SMOOTH)
#
# inverse_output = 1 - output
# inverse_label = 1 - label
#
# intersection = (output & label).sum()
# # print(intersection)
# union = (output | label).sum()
# # print(union)
#
# iou += (intersection + SMOOTH) / (union + SMOOTH)
# iou /= 2
#
# # thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10
#
# print(iou)
# TRANSFORMA IMAGEM EM BINARIA
# img = np.array(Image.open('images/mask.jpg'))
# # print(img.shape)
# _, bin_img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# # print(type(bin_img))
#
# # output = Image.fromarray(bin_img)
# # im.show()
# output = bin_img
# # print(output.shape)
#
# img = np.array(Image.open('../segmentation_dataset/ground_truth/fire000_gt.png'))
# _, bin_img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# # binarr = np.where(img>128, 255, 0)
# # Covert numpy array back to image
# # binimg = Image.fromarray(binarr)
#
# label = Image.fromarray(bin_img)
# label.show()
# # label = bin_img
# label = bin_img
# # print(label)
# # print(label.shape)
input_path = 'output/color_classifier/v0/'
input_path2 = 'output/model/coatnet_4/'
if os.path.isdir(input_path):
lst_img = [os.path.join(input_path, file)
for file in os.listdir(input_path)]
else:
if os.path.isfile(input_path):
lst_img = [input_path]
else:
raise Exception("Invalid path")
for im in lst_img:
print('\t|____Image processing: ', im)
img1 = cv2.imread(im)
# print(img1.shape)
# cv2.imshow('image', img1)
# cv2.waitKey(0)
img2_path = input_path2 + im[im.rfind("/") + 1:]
# print(img2_path)
img2 = cv2.imread(img2_path)
# cv2.imshow('image', img2)
# cv2.waitKey(0)
# print(img2.shape)
dest_and = cv2.bitwise_and(img2, img1, mask=None)
output_path = 'output/intersection/coatnet_4_and_v0/' + im[im.rfind("/") + 1:]
cv2.imwrite(output_path, dest_and)
|
<reponame>FernandoZhuang/Emotion-recognition-of-netizens-during-the-epidemic
'''
Learn from
https://mccormickml.com/2019/07/22/BERT-fine-tuning/#1-setup
https://towardsdatascience.com/bert-classifier-just-another-pytorch-model-881b3cf05784
Depreciated
先验知识 bayes相关函数
动态batchsize
时间层面特征
'''
import torch
import torch.utils.data as tud
import transformers
import sklearn.model_selection as sms
import numpy as np
import pandas as pd
import os
import time
import datetime
import random
import tqdm
import multiprocessing
import functools
import my_setting.utils as utils
import DataPreprocessing as dp
import Hashtag as ht
import SentimentTime as st
import itertools
# HACK 若要实现传统的variable batch size,TensorDataset, SequentialSampler等待修改,暂时弃用
# class TensorDataset(tud.Dataset):
# r'''
# 根据每天的微博数,分别获取动态batch_size
# 由于每天的微博数两在1K以上,若把1K量级数作为Batch_size,不合适
# 于是再把每天的微博数细分到mini-batch
# '''
#
# def __init__(self, *args, **kwargs):
# assert all(args[0].size(0) == tensor.size(0) for tensor in args)
# self.tensors = args
# self.day = (kwargs['preprocessed_data']['datetime'].dt.month - 1) * 31 + kwargs['preprocessed_data'][
# 'datetime'].dt.day
# self.cluster_indices = [i for i in range(len(self.day))]
#
# self.batch_size = []
# cnt, former = 0, 1
# for i in self.day:
# if i != former or cnt + 1 > int(utils.cfg.get('HYPER_PARAMETER', 'batch_size')):
# self.batch_size.append(cnt)
# former, cnt = i, 0
# cnt += 1
# self.batch_size.append(cnt) # 把最后一天的也加入
#
# def __getitem__(self, index):
# return tuple(tensor[index] for tensor in self.tensors)
#
# def __len__(self):
# return self.tensors[0].size(0)
#
#
# class SequentialSampler(tud.Sampler):
# r'''
# 依据动态batch_size,调整每个batch内的序号数
# '''
#
# def __init__(self, data_source, batch_size=None, shuffle=False):
# self.data_source = data_source
# self.batch_sizes = self.data_source.batch_size
# self.shuffle = shuffle
#
# def flatten_list(self, lst):
# return [item for sublist in lst for item in sublist]
#
# def __iter__(self):
# cluster_indices = self.data_source.cluster_indices
# batches, cnt = [], 0
#
# for len_ in self.batch_sizes:
# batches += [cluster_indices[cnt:cnt + len_]]
# cnt = cnt + len_
#
# if self.shuffle: random.shuffle(batches)
#
# return iter(batches)
#
# def __len__(self):
# return len(self.data_source)
class Dataset():
def __init__(self, preprocessed_data=None, tokenizer=None, use_variable_batch=0):
super(Dataset, self).__init__()
self.preprocessed_data = preprocessed_data
self.tokenizer = tokenizer
self.use_variable_batch = use_variable_batch
def get_dataloader(self, is_super=True):
cleaned_data = self.preprocessed_data.cleaned_data
sentences = cleaned_data.content.values
if self.tokenizer is None:
tokenizer = transformers.BertTokenizer.from_pretrained(
utils.cfg.get('PRETRAIN_MODEL', 'original_bert_path'))
else:
tokenizer = transformers.BertTokenizer.from_pretrained(
utils.cfg.get('PRETRAIN_MODEL', 'original_bert_path'))
# TODO input_ids attention_mask要不要做私有属性 在LabeledDataset使用get方法获取?
self.input_ids = self.token_encode_multiprocess(tokenizer=tokenizer, sentences=sentences)
self.attention_masks = self.attention_mask(self.input_ids)
# 当是unlabelDataset才执行返回, labeldataset会在继承的函数中返回
if not is_super:
return self.create_itrator_for_dataset(self.input_ids, self.attention_masks)
def token_encode(self, partial, sentences):
'''
若不要多进程,可基于本函数修改成单一进程tokenize,因此不使用匿名函数嵌套进token_encode_multiprocess
'''
return [partial(str(sent)) for sent in sentences]
def token_encode_multiprocess(self, tokenizer, sentences):
n_cores = 10
start_time = time.time()
with multiprocessing.Pool(n_cores) as p:
token_encode_partial = functools.partial(tokenizer.encode, add_special_tokens=True,
max_length=int(
utils.cfg.get('HYPER_PARAMETER', 'max_sequence_length')),
pad_to_max_length=True)
token_encode_multiprocess_partial = functools.partial(self.token_encode, token_encode_partial)
res = p.map(token_encode_multiprocess_partial, dp.Batch(n_cores, sentences.tolist()))
res = functools.reduce(lambda x, y: x + y, res)
print(f'已获取Token后的ID, 用时:{round(time.time() - start_time, 2)}s')
return res
def attention_mask(self, input_ids):
'''
记录哪些单词被mask
:param input_ids: Token ID
:return: list
'''
attention_masks = []
for sent in input_ids:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
return attention_masks
def create_itrator_for_dataset(self, input_ids=None, attention_masks=None, label_arg=None):
'''
把input_id,att_mask,label(如果有)转换成dataloader
会被labelDataset继承,此时label_arg会被赋值
:return: dataloader
'''
assert input_ids and attention_masks, f'input_ids,attention_masks必须被赋值!'
inputs, masks = torch.tensor(input_ids), torch.tensor(attention_masks)
if self.use_variable_batch:
# 依据每天微博数,variable batch size
if not label_arg:
input_data = TensorDataset(inputs, masks, preprocessed_data=self.preprocessed_data.cleaned_data)
else:
labels = torch.tensor(label_arg)
input_data = TensorDataset(inputs, masks, labels, preprocessed_data=self.preprocessed_data.cleaned_data)
input_sampler = SequentialSampler(input_data)
return tud.DataLoader(input_data, batch_sampler=input_sampler, shuffle=False, num_workers=4)
else:
# 正常loaddataset
if label_arg == None:
input_data = tud.TensorDataset(inputs, masks)
else:
labels = torch.tensor(label_arg)
input_data = tud.TensorDataset(inputs, masks, labels)
input_sampler = tud.SequentialSampler(input_data)
return tud.DataLoader(input_data, sampler=input_sampler, shuffle=False,
batch_size=int(utils.cfg.get('HYPER_PARAMETER', 'batch_size')), num_workers=4)
class LabeledDataset(Dataset):
def __init__(self, preprocessed_data=None, tokenizer=None, use_variable_batch=False):
super(LabeledDataset, self).__init__(preprocessed_data, tokenizer, use_variable_batch)
def get_dataloader(self):
# 把input_ids attention_masks 赋值, 此时不调用create_iter
super().get_dataloader()
labels = self.preprocessed_data.cleaned_data.sentiment.values
labels = labels.tolist()
index = [i for i in range(len(labels))]
train_inputs, validation_inputs, train_masks, validation_masks, train_labels, validation_labels, train_index, validation_index = sms.train_test_split(
self.input_ids, self.attention_masks, labels, index, random_state=2018,
test_size=float(utils.cfg.get('HYPER_PARAMETER', 'test_size')))
train_dataloader = self.create_itrator_for_dataset(train_inputs, train_masks, train_labels)
validation_dataloader = self.create_itrator_for_dataset(validation_inputs, validation_masks, validation_labels)
return train_dataloader, validation_dataloader, train_index, validation_index
class BertForSeqClassification(torch.nn.Module):
def __init__(self, hidden_layers=None, pool_out=None, labels=3, train_label=None):
'''
:param hidden_layers:
:param pool_out:
:param labels:
'''
super(BertForSeqClassification, self).__init__()
if train_label: self.train_label = train_label.cleaneD_data
self._hidden_size = 768
self.hidden_layers, self.pool_out, self.labels = hidden_layers, pool_out, labels
self._config = transformers.BertConfig.from_pretrained(
utils.cfg.get('PRETRAIN_MODEL', 'original_bert_path'), num_labels=self.labels,
output_attentions=False,
output_hidden_states=True)
self.bert = transformers.BertForSequenceClassification.from_pretrained(
utils.cfg.get('PRETRAIN_MODEL', 'original_bert_path'), config=self._config)
self.dropout = torch.nn.Dropout(float(utils.cfg.get('HYPER_PARAMETER', 'hidden_dropout_prob')))
self.loss = torch.nn.CrossEntropyLoss()
if self.hidden_layers is not None: torch.nn.init.xavier_normal_(self.classifier.weight)
@property
def classifier(self):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.hidden_layers is None:
return None
elif self.pool_out is None:
return torch.nn.Linear(self._hidden_size * self.hidden_layers, self.labels).to(device)
else:
return torch.nn.Linear(self._hidden_size * self.hidden_layers + self.labels, self.labels).to(device)
def forward(self, b_input_ids, attention_mask, label_vec=None, window_record=None, day_index=[]):
outputs = self.bert(b_input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_vec)
if self.hidden_layers is not None:
outputs = self._concatenate_hidden_layer_pool_out(outputs, label_vec)
# 注释代码会引起gpu内存不足,把本部分代码放在SentimentTime类中实现,暂时的解决方案
# if window_record is not None:
# outputs = (self._bayes_time(outputs[1], label_vec, window_record, day_index),) + outputs[1:]
return outputs
def _concatenate_hidden_layer_pool_out(self, original_output, label_vec):
cat_seq = (original_output[-2],) if self.pool_out is not None else ()
# 之所以用-1索引,应对train, vali不同情景下origianl_output是否含有loss成员,导致hidder_layer索引可变
cat_seq = cat_seq + tuple(original_output[-1][-(i + 1)][:, 0] for i in range(self.hidden_layers))
last_cat = torch.cat(cat_seq, 1)
logits = self.classifier(last_cat)
if label_vec is not None:
loss = self.loss(logits.view(-1, self.labels), label_vec.view(-1))
outputs = (loss,)
outputs = outputs + tuple(torch.nn.functional.softmax(logits, -1))
# outputs = outputs + [logits]
else:
outputs = tuple(torch.nn.functional.softmax(logits, -1))
# outputs=[logits]
return outputs
def _bayes_time(self, logits, labels, window_record, day_index: list):
r'''
在train阶段,依据情感极性随时间变化纠正神经网络输出
:return: loss
'''
res = torch.tensor([], requires_grad=True).double()
probability = torch.from_numpy(window_record.loc[day_index].astype(float).values)
probability.requires_grad = False
for index, logit in enumerate(logits):
tmp = torch.tensor([probability[index][i] * logit[i] for i in range(3)])
res = torch.cat((res, tmp), 0)
res = res.to('cuda')
loss = self.loss(res.view(-1, self.labels), labels.view(-1))
return loss
def train(use_variable_batch=0, train_bayes=0):
'''
默认fine-tune后,紧接着预测。可注释,从本地加载再预测
'''
seed()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocessed_data = dp.LabeledDataset()
sentiment_bayes = st.SentimentTime(train_label=preprocessed_data)
window_record = sentiment_bayes.window_time_sentiment(window=1)
ld = LabeledDataset(preprocessed_data=preprocessed_data, tokenizer=None,
use_variable_batch=int(utils.cfg.get('HYPER_PARAMETER', 'use_variable_batch')))
# 如果要拼接隐藏层和pool out,此处实例化需要相应传参数
# model = BertForSeqClassification(hidden_layers=2, pool_out=True, labels=3).to(device)
model = BertForSeqClassification(labels=3).to(device)
loss_values = []
train_dataloader, validation_dataloader, train_index, validation_index = ld.get_dataloader()
epochs = int(utils.cfg.get('HYPER_PARAMETER', 'epochs'))
train_steps = len(train_dataloader) * epochs
# region Optimizer and Learning schduler
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': float(utils.cfg.get('HYPER_PARAMETER', 'weight_decay'))},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = transformers.AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
scheduler = transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(
utils.cfg.get('HYPER_PARAMETER', 'warmup_steps')), num_training_steps=train_steps)
# endregion
for epoch_i in range(0, epochs):
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
t0 = time.time()
total_loss, batch_cnt = 0, 0
model.train()
for step, batch in enumerate(train_dataloader):
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
day_index = cal_day_index(train_index, batch_cnt, b_input_ids.shape[0], preprocessed_data.cleaned_data)
batch_cnt += b_input_ids.shape[0]
model.zero_grad()
outputs = model(b_input_ids, attention_mask=b_input_mask, label_vec=b_labels, window_record=window_record,
day_index=day_index)
loss = outputs[0] if train_bayes == 0 else sentiment_bayes.bayes_train(outputs[1], b_labels, day_index, 1)
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
loss_values.append(avg_train_loss)
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(format_time(time.time() - t0)))
# region Validation
print("Running Validation...")
t0 = time.time()
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples, batch_cnt = 0, 0, 0
for batch in validation_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
day_index = cal_day_index(validation_index, batch_cnt, b_input_ids.shape[0], preprocessed_data.cleaned_data)
batch_cnt += b_input_ids.shape[0]
with torch.no_grad():
outputs = model(b_input_ids, attention_mask=b_input_mask)
logits = outputs[0] if train_bayes == 0 else sentiment_bayes.bayes_train(outputs[0], labels=None,
day_index=day_index, window=1)
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
print(" Accuracy: {0:.2f}".format(eval_accuracy / nb_eval_steps))
print(" Validation took: {:}".format(format_time(time.time() - t0)))
# endregion
print("Training complete!")
# region Save Model
output_dir = '../Output/Bert_base_Chinese/'
if not os.path.exists(output_dir): os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
# 自定义模型无save_pretrained方法
# model_to_save.save_pretrained(output_dir)
output_file = os.path.join(output_dir, 'pytorch_model.bin')
torch.save(model_to_save.state_dict(), output_file)
print("Saving model to %s" % output_dir)
# endregion
# Test
test(model)
def test(model=None):
print('Predicting labels in test sentences...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if model is None:
model = BertForSeqClassification()
model.load_state_dict(
torch.load((utils.cfg.get('PRETRAIN_MODEL', 'fine_tuned_bert_path') + '/pytorch_model.bin')))
model.to(device)
for param_tensor in model.state_dict():
print(param_tensor, "\t", model.state_dict()[param_tensor].size())
tokenizer = transformers.BertTokenizer.from_pretrained(
utils.cfg.get('PRETRAIN_MODEL', 'fine_tuned_bert_path'))
model.eval()
test_set = dp.TestDataset()
ul = Dataset(test_set, tokenizer)
predict_dataloader = ul.get_dataloader(is_super=False)
predictions = []
for batch in tqdm.tqdm(predict_dataloader):
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask = batch
with torch.no_grad(): outputs = model(b_input_ids, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
predictions.append(logits)
# bayes
# train_label = dp.LabeledDataset()
# hashtag = ht.Hashtag(train_label=True, test=test_set)
# sentiment_bayes = st.SentimentTime(test=test_set)
# predictions = hashtag.bayes(predictions)
# predictions = sentiment_bayes.bayes(predictions, 1)
predict_labels = []
for i in range(len(predictions)): predict_labels.append(np.argmax(predictions[i], axis=1).flatten().tolist())
test_set.fill_result(list(itertools.chain(*predict_labels))) # 把多个list合并成一个list
test_set.submit()
print(' DONE.')
def cal_day_index(index, start, offset, weibo_data):
index_range = index[start:start + offset]
selected_weibo_data = weibo_data.iloc[index_range]
day_index = ((selected_weibo_data['datetime'].dt.month - 1) * 31 + selected_weibo_data['datetime'].dt.day)
return day_index.to_list()
def seed():
random.seed(int(utils.cfg.get('HYPER_PARAMETER', 'seed')))
np.random.seed(int(utils.cfg.get('HYPER_PARAMETER', 'seed')))
torch.manual_seed(int(utils.cfg.get('HYPER_PARAMETER', 'seed')))
torch.cuda.manual_seed_all(int(utils.cfg.get('HYPER_PARAMETER', 'seed')))
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
label_flat = labels.flatten()
return np.sum(pred_flat == label_flat) / len(label_flat)
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
if __name__ == '__main__':
# use_variable_batch = int(utils.cfg.get('HYPER_PARAMETER', 'use_variable_batch'))
# train_bayes = int(utils.cfg.get('HYPER_PARAMETER', 'train_bayes'))
train()
test()
|
"""NSGA-II related functions"""
import functools
from nsga2.population import Population
import random
from examples.interfaceTriclusteringNSGAII import InterfaceTriclusteringNSGAII as InterfaceTrNSGA
import examples.triclusteringPlusAffiramationScore as tr
from examples.triclusteringPlusAffiramationScore import Tricluster
class NSGA2Utils(object):
def __init__(self, problem, num_of_individuals, mutation_strength=0.2, num_of_genes_to_mutate=5, num_of_tour_particips=2):
self.problem = problem
self.num_of_individuals = num_of_individuals
self.mutation_strength = mutation_strength
self.number_of_genes_to_mutate = num_of_genes_to_mutate
self.num_of_tour_particips = num_of_tour_particips
self.data = problem.zdt_definitions.data
# def fast_nondominated_sort(self, population):
# population.fronts = []
# population.fronts.append([])
# for individual in population:
# individual.domination_count = 0
# individual.dominated_solutions = set()
#
# for other_individual in population:
# if individual.dominates(other_individual):
# individual.dominated_solutions.add(other_individual)
# elif other_individual.dominates(individual):
# individual.domination_count += 1
# if individual.domination_count == 0:
# population.fronts[0].append(individual)
# individual.rank = 0
# i = 0
# while len(population.fronts[i]) > 0:
# temp = []
# for individual in population.fronts[i]:
# for other_individual in individual.dominated_solutions:
# other_individual.domination_count -= 1
# if other_individual.domination_count == 0:
# other_individual.rank = i+1
# temp.append(other_individual)
# i = i+1
# population.fronts.append(temp)
def fast_nondominated_sort(self, population):
population.fronts = []
population.fronts.append([])
for individual in population:
individual.domination_count = 0
individual.dominated_solutions = set()
for other_individual in population:
if individual.dominates(other_individual):
individual.dominated_solutions.add(other_individual)
elif other_individual.dominates(individual):
individual.domination_count += 1
if individual.domination_count == 0:
population.fronts[0].append(individual)
individual.rank = 0
i = 0
while len(population.fronts[i]) > 0:
temp = []
for individual in population.fronts[i]:
for other_individual in individual.dominated_solutions:
other_individual.domination_count -= 1
if other_individual.domination_count == 0:
other_individual.rank = i + 1
temp.append(other_individual)
i = i + 1
population.fronts.append(temp)
def cmp(self,a, b):
return (a > b) - (a < b)
def cmp_to_key(self,mycmp):
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def __sort_objective(self, val1, val2, m):
return self.cmp(val1.objectives[m], val2.objectives[m])
def calculate_crowding_distance(self, front):
if len(front) > 0:
solutions_num = len(front)
for individual in front:
individual.crowding_distance = 0
for m in range(len(front[0].objectives)):
front = sorted(front, key=lambda individual: individual.objectives[m])
# front = sorted(front, key=self.cmp_to_key (functools.partial(self.__sort_objective, m=m)))
front[0].crowding_distance = self.problem.max_objectives[m]
front[solutions_num-1].crowding_distance = self.problem.max_objectives[m]
for index, value in enumerate(front[1:solutions_num-1]):
front[index].crowding_distance = (front[index+1].crowding_distance - front[index-1].crowding_distance) / (self.problem.max_objectives[m] - self.problem.min_objectives[m])
def crowding_operator(self, individual, other_individual):
if (individual.rank == None or individual.crowding_distance == None or other_individual.crowding_distance == None):
print("rank ----- ", individual.rank)
print("indi --- ", individual.crowding_distance)
print("other indi --- ", other_individual.crowding_distance)
print("None objective function value")
else:
if (individual.rank < other_individual.rank) or \
((individual.rank == other_individual.rank) and (individual.crowding_distance > other_individual.crowding_distance)):
return 1
else:
return -1
def nonEmptyTriclusterBackUp(self, individual):
isRowsEmpty = True
isColsEmpty = True
# isTimesEmpty = True
# i=0
j=0
k=0
for i in range(self.data.shape[0]):
if individual.features[i] == 1:
isRowsEmpty = False
if isRowsEmpty:
# individual.features[1] = 1
individual.features[i] = 1
for j in range(self.data.shape[1]):
if individual.features[i + j] == 1:
isColsEmpty = False
if isColsEmpty:
# individual.features[i+1] = 1
individual.features[i+j] = 1
def create_initial_population(self):
population = Population()
# self.problem.zdt_definitions.featuresUsed = self.problem.generateEmptyIndividual()
for i in range(self.num_of_individuals):
print("Individual-", i)
individual = self.problem.generateIndividual()
# self.nonEmptyTriclusterBackUp(individual)
# individualsTrimax = self.deltaTrimaxOnChild(individual)
# for indiv in individualsTrimax:
# # self.problem.calculate_objectives(indiv)
# population.population.append(indiv)
population.population.append(individual)
return population
# def create_initial_population(self):
# population = Population()
# for i in range(self.num_of_individuals):
# print ("Individual-", i)
# individual = self.problem.generateIndividual()
# population.population.append(individual)
# return population
#
# def deltaTrimaxOnChild(self, child):
# interfaceTrNSGA = InterfaceTrNSGA(self.data)
# triclusterChild = interfaceTrNSGA.chromosomeToTricluster(child)
# print("MSR child before trimax\t" + str(triclusterChild.msr))
# triclusters = tr.find_triclusters_np(self.data)
# trimaxChildren = []
# for tricluster in triclusters:
# newChild = interfaceTrNSGA.triclusterToChromosome(tricluster, self.problem)
# self.problem.calculate_objectives(newChild)
# trimaxChildren.append(newChild)
# return trimaxChildren
def create_children(self, population):
children = []
while len(children) < len(population):
parent1 = self.__tournament(population)
parent2 = parent1
while parent1.features == parent2.features:
parent2 = self.__tournament(population)
child1, child2 = self.__crossover(parent1, parent2)
self.__mutate(child1)
self.__mutate(child2)
self.problem.calculate_objectives(child1)
self.problem.calculate_objectives(child2)
children.append(child1)
children.append(child2)
# self.nonEmptyTriclusterBackUp(child1)
# self.nonEmptyTriclusterBackUp(child1)
# trimaxChildren1 = self.deltaTrimaxOnChild(child1)
# trimaxChildren2 = self.deltaTrimaxOnChild(child2)
# for i in range(len(trimaxChildren1)):
# self.problem.calculate_objectives(trimaxChildren1[i])
# children.append(trimaxChildren1[i])
# for j in range(len(trimaxChildren2)):
# self.problem.calculate_objectives(trimaxChildren2[j])
# children.append(trimaxChildren2[j])
return children
def __crossover(self, individual1, individual2):
child1 = self.problem.generateIndividual()
child2 = self.problem.generateIndividual()
min_gene_length = min(len(child1.features), len(child2.features))
min_individual_length = min(len(individual1.features), len(individual2.features))
min_choromosome = min(min_individual_length, min_gene_length)
genes_indexes = range(min_choromosome)
half_genes_indexes = random.sample(genes_indexes, 1)
for i in genes_indexes:
if i in half_genes_indexes:
child1.features[i] = individual2.features[i]
child2.features[i] = individual1.features[i]
else:
child1.features[i] = individual1.features[i]
child2.features[i] = individual2.features[i]
return child1, child2
def __mutate(self, child):
# mutation_parameter = random.random()
# self.number_of_genes_to_mutate = int(len(child.features)/4)
# genes_to_mutate = random.sample(range(0, len(child.features)), self.number_of_genes_to_mutate)
# if mutation_parameter > 0.7:
# for gene in genes_to_mutate:
# child.features[gene] = child.features[gene] - self.mutation_strength/2 + random.random() * self.mutation_strength
# if child.features[gene] < 0:
# print('')
# child.features[gene] = 0
# elif child.features[gene] > 1:
# child.features[gene] = 1
mutation_parameter = random.random()
self.number_of_genes_to_mutate = int(len(child.features) / 4)
genes_to_mutate = random.sample(range(0, len(child.features)), self.number_of_genes_to_mutate)
if mutation_parameter > 0.7:
for gene in genes_to_mutate:
child.features[gene] = 1 if child.features[gene] == 0 else 0
def __tournament(self, population):
participants = random.sample(list(population), self.num_of_tour_particips)
best = None
for participant in participants:
if best is None or self.crowding_operator(participant, best) == 1:
best = participant
return best |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 18:11:33 2016
@author: johnlewisiii
"""
import math
import os
import statistics
import sys
from importlib import reload
import emcee
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy import constants as constants
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import Table
from astropy.wcs import WCS
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from scipy import integrate, interpolate, ndimage, signal, special, stats
from weighted import quantile
nd = ndimage
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def nice_pandas(format="{:3.3g}"):
pd.set_option("display.float_format", lambda x: format.format(x))
#############################
#############################
#### Plotting commands ####
#############################
#############################
# Set uniform plot options
# some constants
fwhm = 2 * np.sqrt(2 * np.log(2))
def set_plot_opts(serif_fonts=True):
if serif_fonts:
mpl.rcParams["mathtext.fontset"] = "stix"
mpl.rcParams["font.family"] = "serif"
mpl.rcParams["font.size"] = 14
return None
def check_iterable(arr):
return hasattr(arr, "__iter__")
def color_array(arr, alpha=1):
""" take an array of colors and convert to
an RGBA image that can be displayed
with imshow
"""
img = np.zeros(arr.shape + (4,))
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
c = mpl.colors.to_rgb(arr[row, col])
img[row, col, 0:3] = c
img[row, col, 3] = alpha
return img
def arr_to_rgb(arr, rgb=(0, 0, 0), alpha=1, invert=False, ax=None):
"""
arr to be made a mask
rgb:assumed using floats (0..1,0..1,0..1) or string
"""
# arr should be scaled to 1
img = np.asarray(arr, dtype=np.float64)
img = img - np.nanmin(img)
img = img / np.nanmax(img)
im2 = np.zeros(img.shape + (4,))
if isinstance(rgb, str):
rgb = mpl.colors.to_rgb(rgb)
if invert:
img = 1 - img
im2[:, :, 3] = img * alpha
r, g, b = rgb
im2[:, :, 0] = r
im2[:, :, 1] = g
im2[:, :, 2] = b
# if ax is None:
# ax = plt.gca()
# plt.sca(ax)
# plt.imshow(im2)
return im2
def invert_color(ml, *args, **kwargs):
rgb = mpl.colors.to_rgb(ml)
hsv = mpl.colors.rgb_to_hsv(rgb)
h, s, v = hsv
h = 1 - h
s = 1 - s
v = 1 - v
return mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s, v)))
def icol(*args, **kwargs):
return invert_color(*args, **kwargs)
def get_xylim(ax=None):
if ax is None:
ax = plt.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
return xlim, ylim
def set_xylim(xlim=None, ylim=None, ax=None, origin=None):
"""set xylims with tuples
xlim: tuple of x axis limits
ylim: tuple of y axis limits
origin: sometimes you just want to change the origin
so you can keep the axis limits the same
but just change origin
"""
if ax is None:
ax = plt.gca()
if xlim is None:
xlim = ax.get_xlim()
if ylim is None:
ylim = ax.get_ylim()
if isinstance(xlim, tuple):
xlim = list(xlim)
if isinstance(ylim, tuple):
ylim = list(ylim)
if origin is not None:
if origin is True:
if ax.get_xaxis().get_scale()[:3] != "log":
xlim[0] = 0
if ax.get_yaxis().get_scale()[:3] != "log":
ylim[0] = 0
else:
xlim[0] = origin[0]
ylim[0] = origin[1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return tuple(xlim), tuple(ylim)
def get_cax(ax=None, position="right", frac=0.03, pad=0.05):
"""get a colorbar axes of the same height as current axes
position: "left" "right" ( vertical | )
"top" "bottom" (horizontal --- )
"""
if ax is None:
ax = plt.gca()
size = f"{frac*100}%"
divider = make_axes_locatable(ax)
cax = divider.append_axes(position, size=size, pad=pad)
plt.sca(ax)
return cax
def colorbar(mappable=None, cax=None, ax=None, size=0.03, pad=0.05, **kw):
"""wrapper for pyplot.colorbar.
"""
if ax is None:
ax = plt.gca()
if cax is None:
cax = get_cax(ax=ax, frac=size, pad=pad)
ret = plt.colorbar(mappable, cax=cax, ax=ax, **kw)
return ret
# Plot the KDE for a set of x,y values. No weighting code modified from
# http://stackoverflow.com/questions/30145957/plotting-2d-kernel-density-estimation-with-python
def kdeplot(xp, yp, filled=False, ax=None, grid=None, bw=None, *args, **kwargs):
if ax is None:
ax = plt.gca()
rvs = np.append(xp.reshape((xp.shape[0], 1)), yp.reshape((yp.shape[0], 1)), axis=1)
kde = stats.kde.gaussian_kde(rvs.T)
# kde.covariance_factor = lambda: 0.3
# kde._compute_covariance()
kde.set_bandwidth(bw)
# Regular grid to evaluate kde upon
if grid is None:
x_flat = np.r_[rvs[:, 0].min() : rvs[:, 0].max() : 256j]
y_flat = np.r_[rvs[:, 1].min() : rvs[:, 1].max() : 256j]
else:
x_flat = np.r_[0 : grid[0] : complex(0, grid[0])]
y_flat = np.r_[0 : grid[1] : complex(0, grid[1])]
x, y = np.meshgrid(x_flat, y_flat)
grid_coords = np.append(x.reshape(-1, 1), y.reshape(-1, 1), axis=1)
z = kde(grid_coords.T)
z = z.reshape(x.shape[0], x.shape[1])
if filled:
cont = ax.contourf
else:
cont = ax.contour
cs = cont(x_flat, y_flat, z, *args, **kwargs)
return cs
def wcsaxis(header, N=6, ax=None, fmt="%0.2f", use_axes=False,label=True):
oldax = plt.gca()
if ax is None:
ax = plt.gca()
plt.sca(ax)
xlim = ax.axes.get_xlim()
ylim = ax.axes.get_ylim()
wcs = WCS(header)
naxis = header["NAXIS"] # naxis
naxis1 = header["NAXIS1"] # naxis1
naxis2 = header["NAXIS2"] # naxis2
# crpix1 = hdr['CRPIX1']
# crpix2 = hdr['CRPIX2']
# crval1 = hdr['CRVAL1']
# crval2 = hdr['CRVAL2']
# try:
# cdelt1 = wcs['CDELT1']
# cdelt2 = wcs['CDELT2']
# except BaseException:
# cdelt1 = wcs['CD1_1']
# cdelt2 = wcs['CD2_2']
if not use_axes:
xoffset = ((xlim[1] - xlim[0]) / N) / 5
x = np.linspace(xlim[0] + xoffset, xlim[1] - xoffset, N)
if naxis >= 2:
yoffset = ((ylim[1] - ylim[0]) / N) / 5
y = np.linspace(ylim[0] + yoffset, ylim[1] - yoffset, N)
else:
x = ax.get_xticks()
if naxis >= 2:
y = ax.get_yticks()
if naxis == 1:
x_tick = wcs.all_pix2world(x, 0)
elif naxis == 2:
coord = list(zip(x, y))
x_tick, y_tick = wcs.all_pix2world(coord, 0).T
elif naxis > 2:
c = [x, y]
for i in range(naxis - 2):
c.append([0] * N)
coord = list(zip(*c))
ticks = wcs.all_pix2world(coord, 0)
x_tick, y_tick = np.asarray(ticks)[:, :2].T
plt.xticks(x, [fmt % i for i in x_tick])
plt.yticks(y, [fmt % i for i in y_tick])
if label:
if header["CTYPE1"][0].lower() == "g":
plt.xlabel("Galactic Longitude (l)")
plt.ylabel("Galactic Latitude (b)")
else:
plt.xlabel("Right Ascension (J2000)")
plt.ylabel("Declination (J2000)")
ax.axes.set_xlim(xlim[0], xlim[1])
ax.axes.set_ylim(ylim[0], ylim[1])
plt.sca(oldax)
return ax
def rectangle(c, w, h, angle=0, center=True):
"""
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepts centers
Default : center
options for center: tl, tr, bl, br
"""
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2.0, +w / 2.0, +w / 2.0, -w / 2.0
y = +h / 2.0, +h / 2.0, -h / 2.0, -h / 2.0
# correct the center if starting from corner
if center is not True:
if center[0] == "b":
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.0
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.0
if center[1] == "l":
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.0
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.0
R = rot_matrix(angle * np.pi / 180.0)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return c
def rectangle2(c, w, h, angle=0, center=True):
"""
create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepts centers
Default : center
options for center: tl, tr, bl, br
"""
cx, cy = c
# define initial polygon irrespective of center
x = -w / 2.0, +w / 2.0, +w / 2.0, -w / 2.0
y = +h / 2.0, +h / 2.0, -h / 2.0, -h / 2.0
# correct center if starting from corner
if center is not True:
if center[0] == "b":
# y = tuple([i + h/2. for i in y])
cy = cy + h / 2.0
else:
# y = tuple([i - h/2. for i in y])
cy = cy - h / 2.0
if center[1] == "l":
# x = tuple([i + w/2 for i in x])
cx = cx + w / 2.0
else:
# x = tuple([i - w/2 for i in x])
cx = cx - w / 2.0
R = rot_matrix(angle * np.pi / 180.0)
c = []
for i in range(4):
xr, yr = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
# coord switch to match ordering of FITs dimensions
c.append((cx + xr, cy + yr))
# print (cx,cy)
return np.array([c[0], c[1], c[2], c[3], c[0]]).T
def plot_rectangle(c, w, h, angle=0, center=True, ax=None, n=10, m="-", **plot_kwargs):
if False: # center is True:
print("Hey, did you know this is built into matplotlib")
print(
"Yeah, just do ax.add_patch(plt.Rectangle(xy=(cx,cy),height=h, width=w, angle=deg))"
)
print(
"of course this one will work even if grid is not rectilinear and can use points"
)
print("defined w.r.t. a corner")
if ax is None:
ax = plt.gca()
x, y = rectangle2(c, w, h, angle=angle, center=center)
ax.plot(x, y, **plot_kwargs)
n = n * 1j
# interpolate each linear segment
leg1 = np.r_[x[0] : x[1] : n], np.r_[y[0] : y[1] : n]
leg2 = np.r_[x[1] : x[2] : n], np.r_[y[1] : y[2] : n]
leg3 = np.r_[x[2] : x[3] : n], np.r_[y[2] : y[3] : n]
leg4 = np.r_[x[3] : x[4] : n], np.r_[y[3] : y[4] : n]
ax.plot(*leg1, m, *leg2, m, *leg3, m, *leg4, m, **plot_kwargs)
return ax
def color_hue_shift(c, shift=1):
c = mpl.colors.to_rgb(c)
h, s, v = mpl.colors.rgb_to_hsv(c)
h = h + shift % 1
return mpl.colors.to_hex(mpl.colors.hsv_to_rgb((h, s, v)))
def plot_covariances(p, cov, names=None, figsize=(12, 12), nsamps=5000, smooth=1):
p = np.random.multivariate_normal(p, cov, nsamps)
fig, axs = corner(p, smooth=smooth, names=names, figsize=figsize)
return fig, axs
def plot_astropy_fit_covariances(fit, fitter):
p = fit.parameters
cov = fitter.fit_info["param_cov"]
ax = plot_covariances(p, cov, names=fit.param_names)
return ax
def plot_walkers(sampler, limits=None, bad=None):
"""
sampler : emcee Sampler class
"""
if hasattr(sampler, "__getitem__"):
chain = sampler
ndim = chain.shape[-1]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=(8 * ndim, 4 * ndim))
for w, walk in enumerate(chain[:, limits:, :]):
if bad is None:
color = "k"
elif bad[w]:
color = "r"
else:
color = "k"
for p, param in enumerate(walk.T):
ax = plt.subplot(ndim, 1, p + 1)
ax.plot(param, color, alpha=0.75, lw=0.75)
# ax.set_ylim(param.min()*0.5,param.max()*1.5)
# ax.semilogy()
plt.tight_layout()
return fig
# TODO
# Make it scale properly
# How does matplotlib
# scaling work
def combine_cmap(cmaps, lower, upper, name="custom", N=None, register=True):
n = len(cmaps)
for ic, c in enumerate(cmaps):
if isinstance(c, str):
cmaps[ic] = mpl.cm.get_cmap(c)
if N is None:
N = [256] * n
values = np.array([])
colors = np.empty((0, 4))
for i in range(n):
step = (upper[i] - lower[i]) / N[i]
xcols = np.arange(lower[i], upper[i], step)
values = np.append(values, xcols)
xcols -= xcols.min()
xcols /= xcols.max()
cols = cmaps[i](xcols)
colors = np.vstack([colors, cols])
values -= values.min()
values /= values.max()
arr = list(zip(values, colors))
cmap = mpl.colors.LinearSegmentedColormap.from_list(name, arr)
if (name != "custom") & register:
mpl.cm.register_cmap(name=name, cmap=cmap)
return cmap
def custom_cmap(colormaps, lower, upper, log=(0, 0)):
"""
colormaps : a list of N matplotlib colormap classes
lower : the lower limits for each colormap: array or tuple
upper : the upper limits for each colormap: array or tuple
log : Do you want to plot logscale. This will create
a color map that is usable with LogNorm()
"""
if isinstance(log, tuple):
for lg in log:
if lg:
upper = [np.log10(i / lower[0]) for i in upper]
lower = [np.log10(i / lower[0]) for i in lower]
norm = upper[-1:][0]
else:
lower = lower
upper = upper
norm = upper[-1:][0]
elif log:
upper = [np.log10(i / lower[0]) for i in upper]
lower = [np.log10(i / lower[0]) for i in lower]
norm = upper[-1:][0]
else:
lower = lower
upper = upper
norm = upper[-1:][0]
for ic, c in enumerate(colormaps):
if isinstance(c, str):
colormaps[ic] = mpl.cm.get_cmap(c)
cdict = {"red": [], "green": [], "blue": []}
for color in ["red", "green", "blue"]:
for j, col in enumerate(colormaps):
# print j,col.name,color
x = [i[0] for i in col._segmentdata[color]]
y1 = [i[1] for i in col._segmentdata[color]]
y0 = [i[2] for i in col._segmentdata[color]]
x = [(i - min(x)) / (max(x) - min(x)) for i in x]
x = [((i * (upper[j] - lower[j])) + lower[j]) / norm for i in x]
if (j == 0) & (x[0] != 0):
x[:0], y1[:0], y0[:0] = [0], [y1[0]], [y0[0]]
for i in range(len(x)): # first x needs to be zero
cdict[color].append((x[i], y1[i], y0[i]))
return colors.LinearSegmentedColormap("my_cmap", cdict)
def cmap_split(*args, **kwargs):
"""alias for split_cmap"""
return split_cmap(*args, **kwargs)
def split_cmap(cmapn='viridis',split=0.5,vmin=0, vmaxs=(.5,1),vstep=None,
vsplit=None,log=False):
"""
split a colormap at a certain location
split - where along the colormap will be our split point
by default this split point is put in the middle
of the values
vmin value for colorbar to start at: should max vim in
plotting command
vmaxs (splitvalue,vmax) - where to start the second segment
of the color map. cmap(split) will be located
at valeu=splitvalue
vplit = instead of giving vmin,vmax,a you can split it at a
value between 0,1.
log doesn't do what anyone would think, don't recommend using
"""
if vsplit is not None:
vmin=0
vmaxs=(vsplit,1)
vmin1 = vmin
vmax1 = vmaxs[0]
vmin2 = vmax1
vmax2 = vmaxs[1]
if vstep is None:
vstep= (vmax2 - vmin1)/1024
levels1 = np.arange(vmin1, vmax1+vstep, vstep)
levels2 = np.arange(vmin2, vmax2+vstep, vstep)
ncols1 = len(levels1)-1
#ncols1 = int((vmax1-vmin1)//vstep)
ncols2 = len(levels2)-1
# ncols1 = int((vmax1-vmin1)//vstep)+1
# ncols2 = int((vmax2-vmin2)//vstep)+1
# ncols = ncols1 + ncols2
split = split
# Sample the right number of colours
# from the right bits (between 0 & 1) of the colormaps we want.
cmap2 = mpl.cm.get_cmap(cmapn)
if log:
cmap1 = mpl.cm.get_cmap(cmapn+'_r')
cols1 = cmap1(np.logspace(np.log10(1-split),0, ncols1))[::-1]
cols2 = cmap2(np.logspace(np.log10(split), 0, ncols2))
else:
cols1 = cmap2(np.linspace(0.0, split, ncols1))
cols2 = cmap2(np.linspace(split, 1, ncols2))
#cols2 = cmap2(np.logspace(np.log10(split), 0, ncols2))
# Combine them and build a new colormap:
allcols2 = np.vstack( (cols1,cols2) )
return mpl.colors.LinearSegmentedColormap.from_list('piecewise2', allcols2)
def plot_2dhist(
X,
Y,
xlog=True,
ylog=True,
cmap=None,
norm=mpl.colors.LogNorm(),
vmin=None,
vmax=None,
bins=50,
statistic=np.nanmean,
statstd=np.nanstd,
histbins=None,
histrange=None,
cmin=1,
binbins=None,
weighted_fit=True,
ax=None,
plot_bins=True,
plot_fit=True,
):
"""[plot the 2d hist and x-binned version]
Arguments:
X {array} -- array of x-values
Y {array} -- array of y-values
Keyword Arguments:
xlog {bool} -- use log of X (default: {True})
ylog {bool} -- use log of Y (default: {True})
cmap {[type]} -- cmap for histogram (default: {None})
norm {[type]} -- normalization for histogram cmap (default: {mpl.colors.LogNorm()})
vmin {number} -- min val for cmap (default: {None})
vmax {number} -- max val for cmap (default: {None})
bins {int} -- number of bins for hist2d (default: {50})
statistic {function} -- statistic function (default: {np.nanmean})
statstd {function} -- error stat function (default: {np.nanstd})
histbins {[type]} -- bins for hisogram (default: {None})
histrange {(xmin,xmax),(ymin,ymax)} -- range for histogram (default: {None})
cmin {int} -- [description] (default: {1})
binbins {[type]} -- [description] (default: {None})
weighted_fit {bool} -- [description] (default: {True})
ax {[type]} -- [description] (default: {None})
plot_bins {bool} -- [description] (default: {True})
plot_fit {bool} -- [description] (default: {True})
Returns:
[tuple] -- [x, y, p, ax]
Notes:
this uses mavg from this file. if it is not available, please change
"""
if ax is None:
ax = plt.gca()
if xlog:
x = np.log10(X)
else:
x = np.asarray(X)
if ylog:
y = np.log10(Y)
else:
y = np.asarray(Y)
_ = ax.hist2d(
x,
y,
range=histrange,
bins=histbins,
cmap=cmap,
cmin=cmin,
norm=norm,
vmin=vmin,
vmax=vmax,
zorder=1,
)
# bin the data
if binbins is None:
binbins = np.linspace(np.nanmin(x), np.nanmax(x), 10)
st, be, _ = stats.binned_statistic(x, y, statistic=statistic, bins=binbins)
est, be, _ = stats.binned_statistic(x, y, statistic=statstd, bins=binbins)
cl = np.isfinite(st) & np.isfinite(est)
if plot_bins:
ax.errorbar(
mavg(be)[cl],
st[cl],
yerr=est[cl],
fmt="s",
color="r",
label="binned data",
lw=1.5,
zorder=2,
)
if weighted_fit:
p = np.polyfit(mavg(be)[cl][1:], st[cl][1:], 1, w=1 / est[cl][1:] ** 2)
else:
p = np.polyfit(mavg(be)[cl][1:], st[cl][1:], 1)
funcname = "Best fit: {m:0.5G}*x + {b:0.5G}".format(m=p[0], b=p[1])
if plot_fit:
ax.plot([0, 64], np.polyval(p, [0, 64]), "dodgerblue", lw=1.5, label=funcname)
ax.legend()
return x, y, p, ax
def hist2d(
x,
y,
range=None,
bins=20,
smooth=False,
clip=False,
pad=True,
normed=True,
weights=None,
):
g = np.isfinite(x + y)
x = np.array(x)[g]
y = np.array(y)[g]
if bins is not None:
if range is None:
if isinstance(bins, int) or (bins == "auto"):
xedges = np.histogram_bin_edges(x, bins=bins)
yedges = np.histogram_bin_edges(y, bins=bins)
elif check_iterable(bins) & (len(bins) == 2):
xedges = np.histogram_bin_edges(x, bins=bins[0])
yedges = np.histogram_bin_edges(y, bins=bins[1])
bins = [xedges, yedges]
else:
if (len(range)==2) & (len(range[0])==2):
xedges = np.histogram_bin_edges(x, bins=bins, range=range[0])
yedges = np.histogram_bin_edges(y, bins=bins, range=range[1])
else:
xedges = np.histogram_bin_edges(x, bins=bins, range=range)
yedges = np.histogram_bin_edges(y, bins=bins, range=range)
bins = [xedges, yedges]
elif range is None:
xedges = np.histogram_bin_edges(x, bins=bins)
yedges = np.histogram_bin_edges(y, bins=bins)
bins = [xedges, yedges]
range = None
else:
range = list(map(np.sort, range))
H, X, Y = np.histogram2d(x, y, bins=bins, range=range, weights=weights)
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
if pad:
padn = np.max([2, int(smooth * 2 // 1)])
H, X1, Y1 = extend_hist(H, X1, Y1, fill=0, padn=padn)
if smooth:
if clip:
oldH = H == 0
H = nd.gaussian_filter(H, smooth)
if normed:
sm = data2norm(H)
else:
sm = H
return sm.T, X1, Y1
def clean_color(color, reverse=False):
if isinstance(color, str):
if color[-2:] == "_r":
return color[:-2], True
elif reverse is True:
return color, True
else:
return color, False
else:
return color, reverse
def color_cmap(c, alpha=1, to_white=True, reverse=False):
if to_white:
end = (1, 1, 1, alpha)
else:
end = (0, 0, 0, alpha)
color, reverse = clean_color(c, reverse=reverse)
cmap = mpl.colors.LinearSegmentedColormap.from_list("density_cmap", [color, end])
if reverse:
return cmap.reversed()
else:
return cmap
def contour_level_colors(cmap, levels, vmin=None, vmax=None, center=True):
"""get colors corresponding to those produced by contourf
Arguments:
cmap {string or cmap} -- colormap
levels {list or array} -- desired levels
Keyword Arguments:
vmin {number} -- min value (default: {0})
vmax {number} -- max value (default: {max(levels)})
center {True} -- contourf uses center=True values.
False will produce a border effect (default: {True})
Returns:
[ndarray] -- [list of colors]
"""
vmin = vmin or 0
vmax = vmax or max(levels)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# offset = np.diff(levels)[0] * .5
# colors = mpl.cm.get_cmap(cmap)(norm(levels-offset))
levels = np.r_[0, levels]
center_levels = 0.5 * (levels[1:] + levels[:-1])
return mpl.cm.get_cmap(cmap)(norm(center_levels))
def stat_plot1d(x, ax=None, bins="auto", histtype="step", lw=2, **plot_kwargs):
"""
really just a fall back for stat_plot2d
if one of the paramters has no varaince
Arguments:
x {[type]} -- array
"""
if ax is None:
ax = plt.gca()
ax.hist(x[np.isfinite(x)], bins="auto", histtype="step", lw=2, **plot_kwargs)
return ax
def stat_plot2d(
x,
y,
marker="k.",
bins=20,
range=None,
smooth=0,
xscale=None,
yscale=None,
plot_data=False,
plot_contourf=False,
plot_contour=False,
plot_imshow=False,
plot_binned=True,
color=None,
cmap=None,
levels=None,
mfc=None,
mec=None,
mew=None,
ms=None,
vmin=None,
vmax=None,
alpha=1,
rasterized=True,
linewidths=None,
data_kwargs=None,
contourf_kwargs=None,
contour_kwargs=None,
data_color=None,
contour_color=None,
default_color=None,
binned_color=None,
contourf_levels=None,
contour_levels=None,
lw=None,
debug=False,
zorder=0,
ax=None,
plot_datapoints=None,
):
"""
based on hist2d dfm's corner.py
but so much prettier and so many more options
will eventually part of my own corner.py
(of course most of the corner part will lifted
directly from corner.py (with attribution of course :D
)
## Look Has! Crappy Documentation!!! ##
just know the kwargs give the most direct control
they have precedence over the other keywords
color precedence:
color
marker color (for data only)
data_color (for data only, overrides marker)
contour_color (contour only, overrides color)
match (contour only, overrides both)
"""
if ax is None:
ax = plt.gca()
if xscale == "log":
x = np.log10(x)
if yscale == "log":
y = np.log10(y)
if plot_datapoints is None:
plot_datapoints = plot_data
if not (plot_data or plot_contour or plot_contourf):
# give the user a decent default plot
plot_data = True
plot_contour = True
smooth = 2
if smooth is None:
smooth = 0
g = np.isfinite(x + y)
x, y = np.asarray(x)[g], np.asarray(y)[g]
if (x.var() == 0) & (y.var() == 0):
print(
"Both variables have Variance=0. So no plot can be generated. Here is a plot to help"
)
print("First 10 (or less) elements of x", x[:10])
print("First 10 (or less) elements of y", y[:10])
ax.scatter(x, y)
return 0
elif x.var() == 0:
print(
"Variable X has variance=0. Instead of making an ugly plot, here is a histogram of the remaining variable"
)
stat_plot1d(y)
return 0
elif y.var() == 0:
print(
"Variable Y has variance=0. Instead of making an ugly plot, here is a histogram of the remaining variable"
)
stat_plot1d(x)
return 0
if range is None:
range = [[x.min(), x.max()], [y.min(), y.max()]]
sm_unflat, X1, Y1 = hist2d(x, y, bins=bins, range=range, smooth=smooth)
if xscale == "log":
x = np.power(10, x)
X1 = np.power(10, X1)
ax.set_xscale("log")
if yscale == "log":
y = np.power(10, y)
Y1 = np.power(10, Y1)
ax.set_yscale("log")
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# ALL the plotting stuff
if data_kwargs is None:
data_kwargs = dict()
if contour_kwargs is None:
contour_kwargs = dict()
if contourf_kwargs is None:
contourf_kwargs = dict()
if isinstance(cmap, str):
cmap = mpl.cm.get_cmap(cmap)
if default_color is None:
default_color = ax.plot([], [])[0].get_color()
color_match = color == "match"
data_match = data_color == "match"
colors_not_set = (color is None) & (cmap is None)
color_is_set = (color is not None) & (not color_match)
cmap_is_set = cmap is not None
reverse = False
if isinstance(color, str):
if color[-2:] == "_r":
color, reverse = color[:-2], True
else:
color, reverse = color, False
# MAKE SENSIBLE CHOICES WITH THE COLORS
if debug:
print("(1)", color, cmap)
# we only need color to be set
if colors_not_set: # color not set and cmap not set
color = default_color
cmap = "viridis"
cmap_is_set = True
color_is_set = True
if debug:
print("(1a)", color, cmap, color_is_set, cmap_is_set)
elif color_match & (not cmap_is_set): # color is match and cmap not set
color = default_color
cmap = "viridis"
color_is_set = True
cmap_is_set = True
if debug:
print("(1b)", color, cmap, color_is_set, cmap_is_set)
elif color_match & cmap_is_set:
color = mpl.cm.get_cmap(cmap)(0.5)
color_is_set = True
if debug:
print("(1c)", color, cmap, color_is_set, cmap_is_set)
elif (not color_is_set) & cmap_is_set:
color = default_color
color_is_set = True
if debug:
print("(1d)", color, cmap, color_is_set, cmap_is_set)
if debug:
print("(2)", color, cmap, color_is_set, cmap_is_set)
if data_match & colors_not_set:
# warnings.warn("Used data_color='match' w/o setting color or cmap"+
# "Setting data_color to default color")
data_match = False
data_color = color
if debug:
print("2(a)", data_color)
elif data_match & cmap_is_set:
data_color = mpl.cm.get_cmap(cmap)(0.5)
if debug:
print("2(b)", data_color)
elif data_match & color_is_set:
data_color = color
if debug:
print("2(c)", data_color)
elif data_color is None:
data_color = color
if debug:
print("2(d)", data_color)
if debug:
print("2(e)", data_color)
if debug:
print("(3)", color, cmap, color_is_set, cmap_is_set)
# only create linear colormap is cmap is not set
if not cmap_is_set:
if debug:
print("making linear cmap")
cmap = color_cmap(color, reverse=reverse)
cmap_is_set = True
if debug:
print("(3)", color, cmap, color_is_set, cmap_is_set)
def listornone(thing):
if thing is None:
return thing
elif isinstance(thing, list):
return thing
else:
return [thing]
# color_match is for contours and data
no_set_contour_color = contour_color is None
kwargs_not_set = (contour_kwargs.get("cmap") is None) & (
contour_kwargs.get("colors") is None
)
if kwargs_not_set:
if (color_match & no_set_contour_color) | (contour_color == "match"):
contour_kwargs["colors"] = contour_level_colors(cmap, levels)
elif contour_kwargs.get("colors") is None:
contour_kwargs["colors"] = listornone(contour_color) or listornone(color)
if contour_kwargs.get("levels") is None:
contour_kwargs["levels"] = np.array(levels) # levels
if contour_kwargs.get("linewidths") is None:
if (linewidths is None) & (lw is None):
pass
else:
lw = linewidths or lw
contour_kwargs["linewidths"] = [i for i in np.asarray([lw]).flatten()]
if contour_kwargs.get("alpha") is None:
contour_kwargs["alpha"] = alpha
if contourf_kwargs.get("levels") is None:
new_levels = np.hstack([[0], levels])
contourf_kwargs["levels"] = np.unique(new_levels) # close top contour
if contourf_kwargs.get("alpha") is None:
contourf_kwargs["alpha"] = alpha
if (contourf_kwargs.get("cmap") is None) & (contourf_kwargs.get("colors") is None):
contourf_kwargs["cmap"] = cmap
if data_kwargs.get("color") is None:
_, dmarker, dcolor = mpl.axes._base._process_plot_format(marker)
if dcolor is None:
if color_match | data_match:
data_kwargs["color"] = data_color or color
marker = dmarker
else:
data_kwargs["color"] = data_color or color
if data_kwargs.get("mfc") is None:
data_kwargs["mfc"] = mfc
if data_kwargs.get("mec") is None:
data_kwargs["mec"] = mec
if data_kwargs.get("mew") is None:
data_kwargs["mew"] = mew
if data_kwargs.get("ms") is None:
data_kwargs["ms"] = ms
if data_kwargs.get("alpha") is None:
data_kwargs["alpha"] = alpha
# FINALLY GETTING TO THE PLOTS
if plot_datapoints:
p = ax.plot(
x, y, marker, **data_kwargs, rasterized=rasterized, zorder=zorder + 1
)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
else:
p = None
# if vmin is None:
# vmin = 0
# if vmax is None:
# vmax = levels[-1]
if plot_contourf:
cntrf = ax.contourf(
X1,
Y1,
sm_unflat,
**contourf_kwargs,
vmin=vmin,
vmax=vmax,
zorder=zorder + 2,
)
else:
cntrf = None
if plot_contour:
cntr = ax.contour(
X1, Y1, sm_unflat, **contour_kwargs, vmin=vmin, vmax=vmax, zorder=zorder + 3
)
else:
cntr = None
if plot_imshow:
ax.imshow(
sm_unflat,
origin="lower",
extent=[X1.min(), X1.max(), Y1.min(), Y1.max()],
zorder=zorder + 4,
)
if plot_datapoints:
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
if plot_contour & plot_contourf:
return ax, cntr, cntrf
elif plot_contour:
return ax, cntr
elif plot_contourf:
return ax, cntrf
elif plot_datapoints:
return ax, p
else:
return ax
def annotate(
text,
x,
y,
ax=None,
horizontalalignment="center",
verticalalignment="center",
ha=None,
va=None,
transform="axes",
color="k",
fontsize=9,
facecolor="w",
alpha=0.75,
bbox=dict(),
**kwargs,
):
if ax is None:
ax = plt.gca()
horizontalalignment = ha or horizontalalignment
verticalalignment = va or verticalalignment
if transform == "axes":
transform = ax.transAxes
elif transform == "data":
transform = ax.transData
bbox1 = dict(facecolor=facecolor, alpha=alpha)
bbox1.update(bbox)
text = ax.text(
x,
y,
text,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=transform,
color=color,
fontsize=fontsize,
bbox=bbox1,
**kwargs,
)
return text
# alias only used becuase of old code
def jhist2d(*args, **kwargs):
return stat_plot2d(*args, **kwargs)
def corner(pos, names=None, smooth=1, bins=20, figsize=None, **kwargs):
"""produce a corner plot
Parameters
----------
pos : np.array
each item should be a row. pos.size = MxN, N items, M
names : list of strings, optional
names of variables to be plotted, must have N elements, by default None
smooth : int, optional
how much to smooth the contours/histogram, by default 1
bins : int, optional
number of bins for histogram, by default 20
figsize : tuple, optional
[description], by default 2 * pos.shape[1] + 0.5
Returns
-------
[type]
[description]
"""
if figsize is None:
dim = 2 * pos.shape[1] + 0.5
figsize = (dim, dim)
fig, axs = plt.subplots(
nrows=pos.shape[1],
ncols=pos.shape[1],
sharex=False,
sharey=False,
figsize=figsize,
)
for i in range(pos.shape[-1]):
for j in range(pos.shape[-1]):
ax = axs[i, j]
if i == j:
stat_plot1d(pos[:, i], ax=axs[i, j])
ax.set_xlabel(names[j])
if j < i:
stat_plot2d(
pos[:, j],
pos[:, i],
ax=ax,
bins=bins,
smooth=smooth,
plot_datapoints=True,
plot_contour=True,
**kwargs,
)
if names is not None:
try:
if i != j :
ax.set_xlabel(names[j])
ax.set_ylabel(names[i])
except:
pass
if j > i:
plt.delaxes(axs[i, j])
fig.tight_layout()
return fig, axs
def plotoneone(
color="k",
lw=2,
scale=1,
offset=0,
p=None,
invert=False,
n=50,
start=None,
end=None,
ax=None,
**kwargs,
):
if ax is None:
ax = plt.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
if start is None:
start = np.min([xlim[0], ylim[0]])
if end is None:
end = np.max([xlim[1], ylim[1]])
axscale = ax.get_xscale()
if axscale == "log":
xs = np.logspace(np.log10(start), np.log10(end), n)
else:
xs = np.linspace(start, end, n)
if p is not None:
scale, offset = p
ys = scale * xs + offset
if invert:
ax.plot(ys, xs, color=color, lw=lw, **kwargs)
else:
ax.plot(xs, ys, color=color, lw=lw, **kwargs)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
def oplot_hist(
X,
bins=None,
ylim=None,
scale=0.5,
ax=None,
show_mean=False,
show_median=False,
show_percentiles=None,
):
if ax is None:
ax = plt.gca()
if ylim is None:
ylim = ax.get_ylim()
if bins is None:
bins = "auto"
H, xedge = np.histogram(
X, range=np.nanpercentile(X, [0, 100]), bins=bins, density=True
)
H = (H / H.max()) * (ylim[1] - ylim[0]) * scale + ylim[0]
ax.step(mavg(xedge), H, where="mid", color="0.25", alpha=1, zorder=10, lw=1.5)
if show_mean:
ax.axvline(np.nanmean(X), 0, 1, color="0.45", ls="--")
if show_median:
ax.axvline(np.nanmedian(X), 0, 1, color="0.45", ls="--")
if not (show_percentiles is None):
for p in show_percentiles:
ax.axvline(p, 0, 1, color="0.45", ls="--", alpha=0.5)
return ax
def multi_colored_line_plot(
x, y, z=None, cmap="viridis", norm=None, vmin=None, vmax=None, ax=None, **kwargs
):
"""
adapted from matplotlib gallery
"""
if ax is None:
ax = plt.gca()
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if z is None:
z = y
# Create a continuous norm to map from data points to colors
if vmin is None:
vmin = np.nanmin(z)
if vmax is None:
vmax = np.nanmax(z)
if norm is None:
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
lc = mpl.collections.LineCollection(segments, cmap=cmap, norm=norm, **kwargs)
# Set the values used for colormapping
lc.set_array(z)
line = ax.add_collection(lc)
# fig.colorbar(line, ax=axs[0])
return line
def errorbar_fill(
x=None,
y=None,
yerr=None,
*args,
ax=None,
mid=True,
color=None,
alpha=1,
lw=1,
ls="-",
fmt=None,
label=None,
**kwargs,
):
oldax = plt.gca()
if ax is None:
ax = oldax
plt.sca(ax)
if mid:
alpha_fill = alpha * 2
if alpha_fill >= 1:
alpha_fill = 1
plt.fill_between(x, y - yerr, y + yerr, color=color, alpha=alpha,label=label,**kwargs)
if mid:
plt.plot(x, y, "-", color=color, alpha=alpha, lw=lw, ls=ls,**kwargs)
plt.sca(oldax)
return None
def plot_to_origin(ax=None):
if ax is None:
ax = plt.gca()
ax.set_xlim(0, ax.get_xlim()[1])
ax.set_ylim(0, ax.get_ylim()[1])
return None
def plot_covariance_ellipse(cov, mu, n=1, ax=None, c='b', lw=1, zorder=100):
P, D, T = eigen_decomp(cov, mu, return_slope=False)
m = P[1] / P[0]
major = np.argmax(D.diagonal())
angle = np.arctan(m)[major] * 180 / np.pi
axes = n * np.sqrt(D.diagonal())
b, a = axes[np.argsort(D.diagonal())]
# let the width be the length fo the major axis
pat = mpl.patches.Ellipse(
angle=angle,
xy=b,
width=2*a,
height=2*b,
zorder=zorder,
facecolor="none",
edgecolor=c,
lw=lw,
)
if ax is None:
plt.gca().add_artist(pat)
else:
ax.add_artist(pat)
return a, b, angle
def eigenplot(A, b=[0, 0], n=3, plot_data=False, vec_c="r", ell_c="b", ell_lw=2, **kwargs):
# https://janakiev.com/blog/covariance-matrix/
eVa, eVe = np.linalg.eig(A)
b = np.array(b)
if plot_data:
data = np.random.multivariate_normal(b, A, 2000)
plt.plot(*data.T, "k.")
P, D = eVe, np.diag(eVa)
S = D ** 0.5
T = P @ S # transform from real to eigenspace
# Columns of T are scaled eigenvectors
# for eigenvector in T
for i in T.T:
i = b + n * i
plt.plot([b[0], i[0]], [b[1], i[1]], c=vec_c, zorder=100, **kwargs)
m = P[1] / P[0]
y_int = -m * b[0] + b[1]
major = np.argmax(eVa)
angle = np.arctan(m)[major] * 180 / np.pi
# print(angle)
# get the norm of the
# a1 = 2 * n * np.linalg.norm(T, axis=0)
a1 = 2 * n * np.sqrt(eVa)
h, w = a1[np.argsort(eVa)]
pat = mpl.patches.Ellipse(
angle=angle,
xy=b,
width=w,
height=h,
zorder=100,
facecolor="none",
edgecolor=ell_c,
lw=ell_lw,
)
plt.gca().add_artist(pat)
# print(m[major], y_int[major])
return m[major], y_int[major]
def eigenplot_from_data(x, y, n=3, data=False, vec_c="r", ell_c="b", ell_lw=2):
g = np.isfinite(x + y)
cov = np.cov(x[g], y[g])
b = np.mean(x[g]), np.mean(y[g])
if data:
plt.plot(x, y, "k.", zorder=0)
out = eigenplot(cov, b, data=False, n=n, vec_c=vec_c, ell_c=ell_c, ell_lw=ell_lw)
return out
def figsize(arr, default=[6, 6]):
arr = np.array(arr)
norm = np.array(arr.shape) / np.max(arr.shape)
figsize = (np.array(default) * norm)[::-1]
return figsize
|
from civicboom.lib.base import *
from cbutils.misc import make_username
from civicboom.model import User, Group
from civicboom.lib.authentication import get_user_from_openid_identifyer, get_user_and_check_password, signin_user, signin_user_and_redirect, signout_user, login_redirector, set_persona
from civicboom.lib.services.janrain import janrain
from civicboom.lib.accounts import verify_email_hash, associate_janrain_account, set_password, has_account_without_password, send_verifiy_email
from civicboom.lib.constants import get_action_objects_for_url
from civicboom.controllers.register import register_new_janrain_user
import time
log = logging.getLogger(__name__)
class AccountController(BaseController):
"""
@title Accounts
@doc account
@desc controller for signing in and out
"""
#---------------------------------------------------------------------------
# Signout
#---------------------------------------------------------------------------
# while not massively dangerous, posting an image with eg <img src="http://civicboom.com/account/signout">
# is a common prank, so this needs authenticating
@web
@auth
#@authenticate_form
def signout(self, **kwargs):
"""
POST /account/signout: End the current session
This function is also pointed to from the ini config to trigger AuthKit to remove cookies
Redirect to a new URL so that the browser doesn't cache it (Shouldn't be necessary, but it
seems that sometimes it is?)
@api account 1.0 (WIP)
@return 302 redirect to the front page
"""
signout_user(c.logged_in_persona)
return redirect(url(controller='misc', action='titlepage', ut=str(time.time())))
#---------------------------------------------------------------------------
# Janrain Engage - http://www.janrain.com/products/engage
#---------------------------------------------------------------------------
@web
# mobile requires a valid cert, mobile.civicboom.com doesn't have one
#@https() # redirect to https for transfer of password
def signin(self, **kwargs):
"""
POST /account/signin: Create a new session
@api account 1.0 (WIP)
@param username the user's civicboom.com username
@param password the user'<PASSWORD>
@return 200 logged in ok
auth_token the token to be supplied with any data-modifying requests
"""
# If no POST display signin template
if request.environ['REQUEST_METHOD'] == 'GET':
action_objects = get_action_objects_for_url(session_get('login_redirect') or '')
if action_objects:
c.action_objects = action_objects
#return render("/html/web/account/signin_frag.mako")
#return render("/html/web/account/signin.mako")
return action_ok()
# Without this line, a simple blank POST would see no janrain token,
# and no username / password, so it would leave c.logged_in_user
# as-is. Then, it would return the CSRF token. This is bad because
# it means any random third-party site can automatically get the
# token, generate a form, and submit it.
#
# Thus, we need to only give out the token if the user has supplied
# valid auth credentials with the request.
if c.logged_in_user and False: # Shish: temp hack, mobile relies on the old behaviour
raise action_error("user is logged in already", code=400)
c.auth_info = None
login_provider = None
# Authenticate with Janrain
if 'token' in kwargs:
if config['test_mode'] and kwargs.get('fake_janrain_return'):
import json
c.auth_info = json.loads(kwargs.get('fake_janrain_return'))
else:
c.auth_info = janrain('auth_info', token=kwargs.get('token'))
if c.auth_info:
c.logged_in_user = get_user_from_openid_identifyer(c.auth_info['profile']['identifier']) #Janrain guarntees the identifyer to be set
login_provider = c.auth_info['profile']['providerName']
# Authenticate with standard username
if 'username' in kwargs and 'password' in kwargs:
c.logged_in_user = get_user_and_check_password(kwargs['username'], kwargs['password'])
login_provider = "password"
# If user has existing account: Login
if c.logged_in_user:
if c.format in ["html", "redirect"]:
signin_user_and_redirect(c.logged_in_user, login_provider=login_provider)
else:
signin_user(c.logged_in_user, "api-password")
return action_ok("logged in ok", {"auth_token": authentication_token()})
# If no user found but we have Janrain auth_info - create user and redirect to complete regisration
if c.auth_info:
#try : existing_user = get_user(c.auth_info['profile']['displayName'])
#except: pass
#if existing_user:
# TODO
# If we have a user with the same username they may be the same user
# prompt them to link accounts OR continue with new registration.
# Currently if a username conflict appears then a random new username is created and the user is prompted to enter a new one
#pass
c.logged_in_user = register_new_janrain_user(c.auth_info['profile']) # Create new user from Janrain profile data
signin_user_and_redirect(c.logged_in_user, login_provider=login_provider, redirect_url=url(controller="profile", action="index"))
# If not authenticated or any janrain info then error
user_log.warning("Failed to log in as '%s'" % kwargs.get('username', ''))
err = action_error(_('Unable to authenticate user'), code=403)
# AllanC - TODO
# Check if user does exisit but simply has no 'password' login record accociated with it
if has_account_without_password(kwargs.get('username')):
err = action_error(_('%s has not set up a _site_name password yet, please visit your settings on the website') % kwargs.get('username'), code=403)
if c.format in ["html", "redirect"]:
set_flash_message(err.original_dict)
return redirect_to_referer() #AllanC - TODO .. humm .. this will remove the login_action_referer so if they fail a login first time they cant perform the action thats remembered. This need thinking about.
else:
raise err
#---------------------------------------------------------------------------
# Switch Persona
#---------------------------------------------------------------------------
@web
@auth
def set_persona(self, id, prompt_aggregate=None, **kwargs):
"""
POST /account/set_persona/{id}: change the currently active persona
@api account 1.0 (WIP)
@return 200 switched ok
@return 500 switch failed
"""
if set_persona(id):
user_log.info("Switched to persona %s" % id)
# AllanC - not a sutable solution - I wanted an AJAX working version
# I have put a hack in here to force html requests to be redirected
# GregM: addded prompt aggregate (internal use only) to the redirect
if c.format=='html':
if prompt_aggregate:
return redirect(url(controller='profile', action='index', prompt_aggregate=prompt_aggregate))
else:
return redirect(url(controller='profile', action='index'))
return action_ok("switched persona")
else:
user_log.info("Failed to switch to persona %s" % id)
raise action_error("failed to swich persona")
#---------------------------------------------------------------------------
# Link Janrain Account
#---------------------------------------------------------------------------
@web
@authorize
def link_janrain(self, **kwargs):
"""
A user can have their account linked to multiple external accounts
The benefit of this is that all external accounts registered with us will
allow a user to aggregate over those external services.
Only currently logged in users can add additional janrain accounts
"""
id = kwargs.get('id')
username = id
if not username or username == 'me':
username = c.logged_in_persona.username
id = 'me'
user_type = 'group'
user = get_member(username)
if isinstance(user, User):
user_type = 'member'
if not user == c.logged_in_user:
raise action_error(code=403, message="No permission")
else:
raise action_error(code=404, message="Not applicable to groups")
redirect_url = ('/settings/'+id+'/link_janrain').encode('ascii','ignore')
if request.environ['REQUEST_METHOD'] == 'GET':
redirect(url(redirect_url))
c.auth_info = None
if 'token' in kwargs:
c.auth_info = janrain('auth_info', token=kwargs.get('token'))
if c.auth_info:
user_log.info("linked account from %s" % c.auth_info['profile']['providerName'])
associate_janrain_account(user, c.auth_info['profile']['providerName'], c.auth_info['profile']['identifier'])
set_flash_message(action_ok("Account successfully linked to _site_name"))
else:
set_flash_message(action_error("Error linking accounts").original_dict)
redirect(url(redirect_url))
#---------------------------------------------------------------------------
# Verify Email
#---------------------------------------------------------------------------
# AllanC - TODO needs to be updated to use web_params and auto format
def verify_email(self, id):
"""
An email is generated for a user and a hash created for them in the URL
see civicboom_lib for the send_verify_email that generates this if needed
"""
if 'hash' in request.params :
if verify_email_hash(id, request.params['hash'], commit=True):
set_flash_message(action_ok(_('email address has been successfully validated')))
else:
set_flash_message(action_error(_('email validation failed, if you have changed any user settings since sending the validation email, please validate again')).original_dict)
redirect('/')
#---------------------------------------------------------------------------
# Forgotten Password
#---------------------------------------------------------------------------
@web
def forgot_password(self, id=None, **kwargs):
"""
Users can get new hash link set to there email address
"""
c.hash = kwargs.get('hash')
username = id or kwargs.get('username') or kwargs.get('email')
user = get_member(username, search_email=True)
if user.__type__ == 'group':
raise action_error('a _group cannot have a password set, please login as yourself and switch to the _group persona', code=404)
# Step 1: User request link with hash to be sent via email
if not c.hash:
#send_forgot_password_email(user)
send_verifiy_email(user, controller='account', action='forgot_password', message=_('reset your password'))
return action_ok(_('Password reminder sent to %s, please check your email' % username))
if not verify_email_hash(user, c.hash): # abort if unable to verify user
raise action_error(_('unable to verify user'), code=400)
# Step 2: User identifed with hash, show form to enter new password
if request.environ['REQUEST_METHOD'] == 'GET':
# form to enter new password
return render("/html/web/account/forgot_password.mako")
# Step 3: Validate new password and set
else:
import civicboom.lib.form_validators.base
import formencode.validators
class SetPasswordSchema(civicboom.lib.form_validators.base.DefaultSchema):
password_new = civicboom.lib.form_validators.base.PasswordValidator(not_empty=True)
password_new_confirm = civicboom.lib.form_validators.base.PasswordValidator(not_empty=True)
chained_validators = [formencode.validators.FieldsMatch('password_new', 'password_new_confirm')]
# Validate new password
try:
kwargs = SetPasswordSchema().to_python(kwargs)
# Validation Failed
except formencode.Invalid as error:
dict_validated = error.value
dict_validated_errors = error.error_dict or {}
raise action_error(
status = 'invalid' ,
code = 400 ,
message = _('failed validation') ,
template = 'account/forgot_password'
)
user_log.info('new password set for %s' % user)
set_password(user, kwargs['password_new'])
set_flash_message(_('password has been set'))
redirect(url(controller='account', action='signin'))
|
#! /usr/bin/env python3
# ============================================================================
# Copyright 2021 <NAME>
#
# Licensed under the 3-Clause BSD License.
# (See accompanying file 3_CLAUSE_BSD_LICENSE.txt or
# <https://opensource.org/licenses/BSD-3-Clause>.)
# ____________________________________________________________________________
"""
Checks our include files.
This script checks for mishaps in our include files.
Specifically, that every .hpp file other than
breeze/top_level_namespace.hpp that opens namespace breeze_ns
includes breeze/top_level_namespace.hpp in its first include
directive, that every .hpp file has one include guard (and only
one), and that the names of the guarding macros are all
distinct.
It is tailored to our coding style, and is particularly simple,
given that we don't use conditional compilation for anything
else than include guards.
"""
# ----------------------------------------------------------------------------
import collections
import os
import re
import sys
argc = len( sys.argv )
if argc != 2:
sys.exit(
"Wrong number of arguments; usage:"
" check_include_files.py <root_dir_path>"
)
root = sys.argv[ 1 ]
if not os.path.isdir( root ):
sys.exit( "The specified directory was not found" )
regex = re.compile( "^#ifndef (BREEZE_GUARD_\w+)" )
macro_dict = collections.defaultdict( list )
guard_counts = collections.defaultdict( int )
opens_namespace = collections.defaultdict( bool )
has_namespace_include = collections.defaultdict( bool )
exit_code = 0
exit_error = 2
try:
for dir_path, subdir_name, file_names in os.walk( root ):
for name in file_names:
if name.endswith( ".hpp" ):
full_name = os.path.join( dir_path, name )
guard_counts[ full_name ] = 0
first_include_seen = False
opens_namespace[ full_name ] = False
has_namespace_include[ full_name ] = False
for line in open( full_name ):
if line.find( "namespace breeze_ns" ) == 0:
opens_namespace[ full_name ] = True
if not first_include_seen and line.find( "#include" ) == 0:
first_include_seen = True
if line.find(
'#include "breeze/top_level_namespace.hpp"'
) == 0:
has_namespace_include[ full_name ] = True
m = regex.match( line )
if m:
guard_counts[ full_name ] += 1
macro_name = m.group( 1 )
macro_dict[ macro_name ].append( full_name )
except Exception as ex:
print( "An exception occurred: {}".format( ex ), file = sys.stderr )
exit_code = exit_error
for file_name in opens_namespace:
if os.path.basename( file_name) != "top_level_namespace.hpp" \
and opens_namespace[ file_name ] \
and not has_namespace_include[ file_name ]:
print(
"Error: {} doesn't include top_level_namespace.hpp,"
" or includes it after other files".format( file_name ),
file = sys.stderr
)
for file_name in guard_counts:
if guard_counts[ file_name ] != 1:
print(
"Error: {} Breeze guards in {}".format(
guard_counts[ file_name ], file_name
),
file = sys.stderr
)
exit_code = exit_error
for macro_name in macro_dict:
if len( macro_dict[ macro_name ] ) != 1:
print(
"Error: macro name {} duplicated in:".format( macro_name ),
file = sys.stderr
)
for f in macro_dict[ macro_name ]:
print( " {}".format( f ), file = sys.stderr )
exit_code = exit_error
exit( exit_code )
|
<reponame>hadware/pyannote-audio
# MIT License
#
# Copyright (c) 2020-2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import warnings
from pathlib import Path
from typing import Any, Callable, List, Optional, Text, Tuple, Union
import numpy as np
import torch
from einops import rearrange
from pytorch_lightning.utilities.memory import is_oom_error
from pyannote.audio.core.io import AudioFile
from pyannote.audio.core.model import Model
from pyannote.audio.core.task import Resolution
from pyannote.audio.utils.permutation import mae_cost_func, permutate
from pyannote.audio.utils.progress import InferenceProgressHook
from pyannote.core import Segment, SlidingWindow, SlidingWindowFeature
TaskName = Union[Text, None]
class Inference:
"""Inference
Parameters
----------
model : Model
Model. Will be automatically set to eval() mode and moved to `device` when provided.
window : {"sliding", "whole"}, optional
Use a "sliding" window and aggregate the corresponding outputs (default)
or just one (potentially long) window covering the "whole" file or chunk.
skip_aggregation : bool, optional
Do not aggregate outputs when using "sliding" window. Defaults to False.
duration : float, optional
Chunk duration, in seconds. Defaults to duration used for training the model.
Has no effect when `window` is "whole".
step : float, optional
Step between consecutive chunks, in seconds. Defaults to warm-up duration when
greater than 0s, otherwise 10% of duration. Has no effect when `window` is "whole".
batch_size : int, optional
Batch size. Larger values make inference faster. Defaults to 32.
device : torch.device, optional
Device used for inference. Defaults to `model.device`.
In case `device` and `model.device` are different, model is sent to device.
pre_aggregation_hook : callable, optional
When a callable is provided, it is applied to the model output, just before aggregation.
Takes a (num_chunks, num_frames, dimension) numpy array as input and returns a modified
(num_chunks, num_frames, other_dimension) numpy array passed to overlap-add aggregation.
progress_hook : {callable, True, str}, optional
When a callable is provided, it is called everytime a batch is processed
with two integer arguments:
- the number of chunks that have been processed so far
- the total number of chunks
Set to True (or a descriptive string) to display a tqdm progress bar.
use_auth_token : str, optional
When loading a private huggingface.co model, set `use_auth_token`
to True or to a string containing your hugginface.co authentication
token that can be obtained by running `huggingface-cli login`
"""
def __init__(
self,
model: Union[Model, Text, Path],
window: Text = "sliding",
skip_aggregation: bool = False,
device: torch.device = None,
duration: float = None,
step: float = None,
batch_size: int = 32,
pre_aggregation_hook: Callable[[np.ndarray], np.ndarray] = None,
progress_hook: Union[bool, Text, Callable[[int, int], Any]] = False,
use_auth_token: Union[Text, None] = None,
):
self.model = (
model
if isinstance(model, Model)
else Model.from_pretrained(
Path(model),
map_location=device,
strict=False,
use_auth_token=use_auth_token,
)
)
if window not in ["sliding", "whole"]:
raise ValueError('`window` must be "sliding" or "whole".')
specifications = self.model.specifications
if specifications.resolution == Resolution.FRAME and window == "whole":
warnings.warn(
'Using "whole" `window` inference with a frame-based model might lead to bad results '
'and huge memory consumption: it is recommended to set `window` to "sliding".'
)
self.window = window
self.skip_aggregation = skip_aggregation
if device is None:
device = self.model.device
self.device = device
self.pre_aggregation_hook = pre_aggregation_hook
self.model.eval()
self.model.to(self.device)
# chunk duration used during training
specifications = self.model.specifications
training_duration = specifications.duration
if duration is None:
duration = training_duration
elif training_duration != duration:
warnings.warn(
f"Model was trained with {training_duration:g}s chunks, and you requested "
f"{duration:g}s chunks for inference: this might lead to suboptimal results."
)
self.duration = duration
self.warm_up = specifications.warm_up
# Use that many seconds on the left- and rightmost parts of each chunk
# to warm up the model. While the model does process those left- and right-most
# parts, only the remaining central part of each chunk is used for aggregating
# scores during inference.
# step between consecutive chunks
if step is None:
step = 0.1 * self.duration if self.warm_up[0] == 0.0 else self.warm_up[0]
if step > self.duration:
raise ValueError(
f"Step between consecutive chunks is set to {step:g}s, while chunks are "
f"only {self.duration:g}s long, leading to gaps between consecutive chunks. "
f"Either decrease step or increase duration."
)
self.step = step
self.batch_size = batch_size
if callable(progress_hook):
pass
elif isinstance(progress_hook, Text):
progress_hook = InferenceProgressHook(desc=progress_hook)
elif progress_hook:
progress_hook = InferenceProgressHook()
else:
progress_hook = None
self.progress_hook = progress_hook
def infer(self, chunks: torch.Tensor) -> np.ndarray:
"""Forward pass
Takes care of sending chunks to right device and outputs back to CPU
Parameters
----------
chunks : (batch_size, num_channels, num_samples) torch.Tensor
Batch of audio chunks.
Returns
-------
outputs : (batch_size, ...) np.ndarray
Model output.
"""
with torch.no_grad():
try:
outputs = self.model(chunks.to(self.device))
except RuntimeError as exception:
if is_oom_error(exception):
raise MemoryError(
f"batch_size ({self.batch_size: d}) is probably too large. "
f"Try with a smaller value until memory error disappears."
)
else:
raise exception
return outputs.cpu().numpy()
def slide(self, waveform: torch.Tensor, sample_rate: int) -> SlidingWindowFeature:
"""Slide model on a waveform
Parameters
----------
waveform: (num_channels, num_samples) torch.Tensor
Waveform.
sample_rate : int
Sample rate.
Returns
-------
output : SlidingWindowFeature
Model output. Shape is (num_chunks, dimension) for chunk-level tasks,
and (num_frames, dimension) for frame-level tasks.
"""
window_size: int = round(self.duration * sample_rate)
step_size: int = round(self.step * sample_rate)
num_channels, num_samples = waveform.shape
specifications = self.model.specifications
resolution = specifications.resolution
introspection = self.model.introspection
if resolution == Resolution.CHUNK:
frames = SlidingWindow(start=0.0, duration=self.duration, step=self.step)
elif resolution == Resolution.FRAME:
frames = introspection.frames
num_frames_per_chunk, dimension = introspection(window_size)
# prepare complete chunks
if num_samples >= window_size:
chunks: torch.Tensor = rearrange(
waveform.unfold(1, window_size, step_size),
"channel chunk frame -> chunk channel frame",
)
num_chunks, _, _ = chunks.shape
else:
num_chunks = 0
# prepare last incomplete chunk
has_last_chunk = (num_samples < window_size) or (
num_samples - window_size
) % step_size > 0
if has_last_chunk:
last_chunk: torch.Tensor = waveform[:, num_chunks * step_size :]
outputs: Union[List[np.ndarray], np.ndarray] = list()
if self.progress_hook is not None:
self.progress_hook(0, num_chunks + has_last_chunk)
# slide over audio chunks in batch
for c in np.arange(0, num_chunks, self.batch_size):
batch: torch.Tensor = chunks[c : c + self.batch_size]
outputs.append(self.infer(batch))
if self.progress_hook is not None:
self.progress_hook(c + 1, num_chunks + has_last_chunk)
# process orphan last chunk
if has_last_chunk:
last_output = self.infer(last_chunk[None])
if specifications.resolution == Resolution.FRAME:
pad = num_frames_per_chunk - last_output.shape[1]
last_output = np.pad(last_output, ((0, 0), (0, pad), (0, 0)))
outputs.append(last_output)
if self.progress_hook is not None:
self.progress_hook(
num_chunks + has_last_chunk, num_chunks + has_last_chunk
)
outputs = np.vstack(outputs)
# skip aggregation when requested,
# or when model outputs just one vector per chunk
# or when model is permutation-invariant (and not post-processed)
if (
self.skip_aggregation
or specifications.resolution == Resolution.CHUNK
or (
specifications.permutation_invariant
and self.pre_aggregation_hook is None
)
):
frames = SlidingWindow(start=0.0, duration=self.duration, step=self.step)
return SlidingWindowFeature(outputs, frames)
if self.pre_aggregation_hook is not None:
outputs = self.pre_aggregation_hook(outputs)
aggregated = self.aggregate(
SlidingWindowFeature(
outputs,
SlidingWindow(start=0.0, duration=self.duration, step=self.step),
),
frames=frames,
warm_up=self.warm_up,
hamming=True,
missing=0.0,
)
if has_last_chunk:
num_frames = aggregated.data.shape[0]
aggregated.data = aggregated.data[: num_frames - pad, :]
return aggregated
def __call__(self, file: AudioFile) -> Union[SlidingWindowFeature, np.ndarray]:
"""Run inference on a whole file
Parameters
----------
file : AudioFile
Audio file.
Returns
-------
output : SlidingWindowFeature or np.ndarray
Model output, as `SlidingWindowFeature` if `window` is set to "sliding"
and `np.ndarray` if is set to "whole".
"""
waveform, sample_rate = self.model.audio(file)
if self.window == "sliding":
return self.slide(waveform, sample_rate)
return self.infer(waveform[None])[0]
def crop(
self,
file: AudioFile,
chunk: Union[Segment, List[Segment]],
duration: Optional[float] = None,
) -> Union[SlidingWindowFeature, np.ndarray]:
"""Run inference on a chunk or a list of chunks
Parameters
----------
file : AudioFile
Audio file.
chunk : Segment or list of Segment
Apply model on this chunk. When a list of chunks is provided and
window is set to "sliding", this is equivalent to calling crop on
the smallest chunk that contains all chunks. In case window is set
to "whole", this is equivalent to concatenating each chunk into one
(artifical) chunk before processing it.
duration : float, optional
Enforce chunk duration (in seconds). This is a hack to avoid rounding
errors that may result in a different number of audio samples for two
chunks of the same duration.
Returns
-------
output : SlidingWindowFeature or np.ndarray
Model output, as `SlidingWindowFeature` if `window` is set to "sliding"
and `np.ndarray` if is set to "whole".
Notes
-----
If model needs to be warmed up, remember to extend the requested chunk with the
corresponding amount of time so that it is actually warmed up when processing the
chunk of interest:
>>> chunk_of_interest = Segment(10, 15)
>>> extended_chunk = Segment(10 - warm_up, 15 + warm_up)
>>> inference.crop(file, extended_chunk).crop(chunk_of_interest, returns_data=False)
"""
if self.window == "sliding":
if not isinstance(chunk, Segment):
start = min(c.start for c in chunk)
end = max(c.end for c in chunk)
chunk = Segment(start=start, end=end)
waveform, sample_rate = self.model.audio.crop(
file, chunk, duration=duration
)
output = self.slide(waveform, sample_rate)
frames = output.sliding_window
shifted_frames = SlidingWindow(
start=chunk.start, duration=frames.duration, step=frames.step
)
return SlidingWindowFeature(output.data, shifted_frames)
elif self.window == "whole":
if isinstance(chunk, Segment):
waveform, sample_rate = self.model.audio.crop(
file, chunk, duration=duration
)
else:
waveform = torch.cat(
[self.model.audio.crop(file, c)[0] for c in chunk], dim=1
)
return self.infer(waveform[None])[0]
else:
raise NotImplementedError(
f"Unsupported window type '{self.window}': should be 'sliding' or 'whole'."
)
@staticmethod
def aggregate(
scores: SlidingWindowFeature,
frames: SlidingWindow = None,
warm_up: Tuple[float, float] = (0.0, 0.0),
epsilon: float = 1e-12,
hamming: bool = False,
missing: float = np.NaN,
skip_average: bool = False,
) -> SlidingWindowFeature:
"""Aggregation
Parameters
----------
scores : SlidingWindowFeature
Raw (unaggregated) scores. Shape is (num_chunks, num_frames_per_chunk, num_classes).
frames : SlidingWindow, optional
Frames resolution. Defaults to estimate it automatically based on `scores` shape
and chunk size. Providing the exact frame resolution (when known) leads to better
temporal precision.
warm_up : (float, float) tuple, optional
Left/right warm up duration (in seconds).
missing : float, optional
Value used to replace missing (ie all NaNs) values.
skip_average : bool, optional
Skip final averaging step.
Returns
-------
aggregated_scores : SlidingWindowFeature
Aggregated scores. Shape is (num_frames, num_classes)
"""
num_chunks, num_frames_per_chunk, num_classes = scores.data.shape
chunks = scores.sliding_window
if frames is None:
duration = step = chunks.duration / num_frames_per_chunk
frames = SlidingWindow(start=chunks.start, duration=duration, step=step)
else:
frames = SlidingWindow(
start=chunks.start,
duration=frames.duration,
step=frames.step,
)
masks = 1 - np.isnan(scores)
scores.data = np.nan_to_num(scores.data, copy=True, nan=0.0)
# Hamming window used for overlap-add aggregation
hamming_window = (
np.hamming(num_frames_per_chunk).reshape(-1, 1)
if hamming
else np.ones((num_frames_per_chunk, 1))
)
# anything before warm_up_left (and after num_frames_per_chunk - warm_up_right)
# will not be used in the final aggregation
# warm-up windows used for overlap-add aggregation
warm_up_window = np.ones((num_frames_per_chunk, 1))
# anything before warm_up_left will not contribute to aggregation
warm_up_left = round(
warm_up[0] / scores.sliding_window.duration * num_frames_per_chunk
)
warm_up_window[:warm_up_left] = epsilon
# anything after num_frames_per_chunk - warm_up_right either
warm_up_right = round(
warm_up[1] / scores.sliding_window.duration * num_frames_per_chunk
)
warm_up_window[num_frames_per_chunk - warm_up_right :] = epsilon
# aggregated_output[i] will be used to store the sum of all predictions
# for frame #i
num_frames = (
frames.closest_frame(
scores.sliding_window.start
+ scores.sliding_window.duration
+ (num_chunks - 1) * scores.sliding_window.step
)
+ 1
)
aggregated_output: np.ndarray = np.zeros(
(num_frames, num_classes), dtype=np.float32
)
# overlapping_chunk_count[i] will be used to store the number of chunks
# that contributed to frame #i
overlapping_chunk_count: np.ndarray = np.zeros(
(num_frames, num_classes), dtype=np.float32
)
# aggregated_mask[i] will be used to indicate whether
# at least one non-NAN frame contributed to frame #i
aggregated_mask: np.ndarray = np.zeros(
(num_frames, num_classes), dtype=np.float32
)
# loop on the scores of sliding chunks
for (chunk, score), (_, mask) in zip(scores, masks):
# chunk ~ Segment
# score ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray
# mask ~ (num_frames_per_chunk, num_classes)-shaped np.ndarray
start_frame = frames.closest_frame(chunk.start)
aggregated_output[start_frame : start_frame + num_frames_per_chunk] += (
score * mask * hamming_window * warm_up_window
)
overlapping_chunk_count[
start_frame : start_frame + num_frames_per_chunk
] += (mask * hamming_window * warm_up_window)
aggregated_mask[
start_frame : start_frame + num_frames_per_chunk
] = np.maximum(
aggregated_mask[start_frame : start_frame + num_frames_per_chunk],
mask,
)
if skip_average:
average = aggregated_output
else:
average = aggregated_output / np.maximum(overlapping_chunk_count, epsilon)
average[aggregated_mask == 0.0] = missing
return SlidingWindowFeature(average, frames)
@staticmethod
def trim(
scores: SlidingWindowFeature,
warm_up: Tuple[float, float] = (0.1, 0.1),
) -> SlidingWindowFeature:
"""Trim left and right warm-up regions
Parameters
----------
scores : SlidingWindowFeature
(num_chunks, num_frames, num_classes)-shaped scores.
warm_up : (float, float) tuple
Left/right warm up ratio of chunk duration.
Defaults to (0.1, 0.1), i.e. 10% on both sides.
Returns
-------
trimmed : SlidingWindowFeature
(num_chunks, trimmed_num_frames, num_speakers)-shaped scores
"""
assert (
scores.data.ndim == 3
), "Inference.trim expects (num_chunks, num_frames, num_classes)-shaped `scores`"
_, num_frames, _ = scores.data.shape
chunks = scores.sliding_window
num_frames_left = round(num_frames * warm_up[0])
num_frames_right = round(num_frames * warm_up[1])
num_frames_step = round(num_frames * chunks.step / chunks.duration)
assert (
num_frames - num_frames_left - num_frames_right > num_frames_step
), f"Total `warm_up` is so large ({sum(warm_up) * 100:g}% of each chunk) that resulting trimmed scores does not cover a whole step ({chunks.step:g}s)"
new_data = scores.data[:, num_frames_left : num_frames - num_frames_right]
new_chunks = SlidingWindow(
start=chunks.start + warm_up[0] * chunks.duration,
step=chunks.step,
duration=(1 - warm_up[0] - warm_up[1]) * chunks.duration,
)
return SlidingWindowFeature(new_data, new_chunks)
@staticmethod
def stitch(
activations: SlidingWindowFeature,
frames: SlidingWindow = None,
lookahead: Optional[Tuple[int, int]] = None,
cost_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
match_func: Callable[[np.ndarray, np.ndarray, float], bool] = None,
) -> SlidingWindowFeature:
"""
Parameters
----------
activations : SlidingWindowFeature
(num_chunks, num_frames, num_classes)-shaped scores.
frames : SlidingWindow, optional
Frames resolution. Defaults to estimate it automatically based on `activations`
shape and chunk size. Providing the exact frame resolution (when known) leads to better
temporal precision.
lookahead : (int, int) tuple
Number of past and future adjacent chunks to use for stitching.
Defaults to (k, k) with k = chunk_duration / chunk_step - 1
cost_func : callable
Cost function used to find the optimal mapping between chunks.
Defaults to mean absolute error (utils.permutations.mae_cost_func)
match_func : callable
"""
num_chunks, num_frames, num_classes = activations.data.shape
chunks: SlidingWindow = activations.sliding_window
if frames is None:
duration = step = chunks.duration / num_frames
frames = SlidingWindow(start=chunks.start, duration=duration, step=step)
else:
frames = SlidingWindow(
start=chunks.start,
duration=frames.duration,
step=frames.step,
)
max_lookahead = math.floor(chunks.duration / chunks.step - 1)
if lookahead is None:
lookahead = 2 * (max_lookahead,)
assert all(L <= max_lookahead for L in lookahead)
if cost_func is None:
cost_func = mae_cost_func
if match_func is None:
def always_match(this: np.ndarray, that: np.ndarray, cost: float):
return True
match_func = always_match
stitches = []
for C, (chunk, activation) in enumerate(activations):
local_stitch = np.NAN * np.zeros(
(sum(lookahead) + 1, num_frames, num_classes)
)
for c in range(
max(0, C - lookahead[0]), min(num_chunks, C + lookahead[1] + 1)
):
# extract common temporal support
shift = round((C - c) * num_frames * chunks.step / chunks.duration)
if shift < 0:
shift = -shift
this_activations = activation[shift:]
that_activations = activations[c, : num_frames - shift]
else:
this_activations = activation[: num_frames - shift]
that_activations = activations[c, shift:]
# find the optimal one-to-one mapping
_, (permutation,), (cost,) = permutate(
this_activations[np.newaxis],
that_activations,
cost_func=cost_func,
return_cost=True,
)
for this, that in enumerate(permutation):
# only stitch under certain condiditions
matching = (c == C) or (
match_func(
this_activations[:, this],
that_activations[:, that],
cost[this, that],
)
)
if matching:
local_stitch[c - C + lookahead[0], :, this] = activations[
c, :, that
]
# TODO: do not lookahead further once a mismatch is found
stitched_chunks = SlidingWindow(
start=chunk.start - lookahead[0] * chunks.step,
duration=chunks.duration,
step=chunks.step,
)
local_stitch = Inference.aggregate(
SlidingWindowFeature(local_stitch, stitched_chunks),
frames=frames,
hamming=True,
)
stitches.append(local_stitch.data)
stitches = np.stack(stitches)
stitched_chunks = SlidingWindow(
start=chunks.start - lookahead[0] * chunks.step,
duration=chunks.duration + sum(lookahead) * chunks.step,
step=chunks.step,
)
return SlidingWindowFeature(stitches, stitched_chunks)
|
# Copyright 2021 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for test_point_aggregator.py."""
from absl.testing import absltest
import numpy as np
import pandas as pd
from wfa_planning_evaluation_framework.driver.test_point_aggregator import (
AGGREGATORS,
_reach,
_shuffle_distance,
aggregate,
)
from wfa_planning_evaluation_framework.models.reach_point import ReachPoint
class TestPointAggregatorTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
cls.rp110 = ReachPoint([1000], [0.0, 0.0, 0.0], [10.0])
cls.rp111 = ReachPoint([1000], [200.0, 100.0, 50.0], [10.0])
cls.rp112 = ReachPoint([1000], [210.0, 110.0, 60.0], [10.0])
cls.rp121 = ReachPoint([2000], [300.0, 150.0, 75.0], [30.0])
cls.rp122 = ReachPoint([2000], [320.0, 170.0, 95.0], [30.0])
cls.rp131 = ReachPoint([3000], [400.0, 200.0, 100.0], [40.0])
cls.rp132 = ReachPoint([3000], [430.0, 230.0, 130.0], [40.0])
cls.test_points1 = [cls.rp111, cls.rp121, cls.rp131]
cls.model_points1 = [cls.rp112, cls.rp122, cls.rp132]
def test_npoints(self):
self.assertEqual(
AGGREGATORS["npoints"](self.test_points1, self.model_points1), 3
)
def test_mean_error(self):
self.assertEqual(
AGGREGATORS["mean_error"](self.test_points1, self.model_points1), -20
)
self.assertEqual(
AGGREGATORS["mean_error"](self.model_points1, self.test_points1), 20
)
def test_mean_abs_error(self):
self.assertEqual(
AGGREGATORS["mean_abs_error"](self.test_points1, self.model_points1), 20
)
self.assertEqual(
AGGREGATORS["mean_abs_error"](self.model_points1, self.test_points1), 20
)
def test_mean_squared_error(self):
self.assertEqual(
AGGREGATORS["mean_squared_error"](self.test_points1, self.model_points1),
1400 / 3,
)
self.assertEqual(
AGGREGATORS["mean_squared_error"](self.model_points1, self.test_points1),
1400 / 3,
)
def test_mean_abs_relative_error(self):
self.assertEqual(
AGGREGATORS["mean_abs_relative_error"](
self.test_points1, self.model_points1
),
1.0 / 3 * (10.0 / 200.0 + 20.0 / 300.0 + 30.0 / 400.0),
)
def test_mean_abs_relative_error_at_higher_frequencies(self):
self.assertEqual(
AGGREGATORS["mare_freq_at_least_2"](self.test_points1, self.model_points1),
1.0 / 3 * (10.0 / 100.0 + 20.0 / 150.0 + 30.0 / 200.0),
)
self.assertEqual(
AGGREGATORS["mare_freq_at_least_3"](self.test_points1, self.model_points1),
1.0 / 3 * (10.0 / 50.0 + 20.0 / 75.0 + 30.0 / 100.0),
)
self.assertTrue(
np.isnan(
AGGREGATORS["mare_freq_at_least_4"](
self.test_points1, self.model_points1
)
)
)
def test_mean_squared_relative_error(self):
self.assertEqual(
AGGREGATORS["mean_squared_relative_error"](
self.test_points1, self.model_points1
),
1.0
/ 3
* (
10.0 ** 2 / 200.0 ** 2 + 20.0 ** 2 / 300.0 ** 2 + 30.0 ** 2 / 400.0 ** 2
),
)
def test_var_error(self):
self.assertEqual(
AGGREGATORS["var_error"](self.test_points1, self.model_points1), 200.0 / 3
)
def test_var_relative_error(self):
self.assertAlmostEqual(
AGGREGATORS["var_relative_error"](self.test_points1, self.model_points1),
0.00010802,
)
def test_relative_error_quantiles(self):
xlist = []
ylist = []
for i in range(11):
xlist.append(ReachPoint([i], [1]))
ylist.append(ReachPoint([i], [i + 1]))
self.assertEqual(AGGREGATORS["relative_error_q10"](xlist, ylist), 1.0)
self.assertEqual(AGGREGATORS["relative_error_q20"](xlist, ylist), 2.0)
self.assertEqual(AGGREGATORS["relative_error_q30"](xlist, ylist), 3.0)
self.assertEqual(AGGREGATORS["relative_error_q40"](xlist, ylist), 4.0)
self.assertEqual(AGGREGATORS["relative_error_q50"](xlist, ylist), 5.0)
self.assertEqual(AGGREGATORS["relative_error_q60"](xlist, ylist), 6.0)
self.assertEqual(AGGREGATORS["relative_error_q70"](xlist, ylist), 7.0)
self.assertEqual(AGGREGATORS["relative_error_q80"](xlist, ylist), 8.0)
self.assertEqual(AGGREGATORS["relative_error_q90"](xlist, ylist), 9.0)
def test_mean_shuffle_distance(self):
self.assertEqual(
AGGREGATORS["mean_shuffle_distance"](self.test_points1, self.model_points1),
1.0,
)
xlist = [ReachPoint([1], [6, 5, 4, 3, 2, 1], [1])]
self.assertEqual(AGGREGATORS["mean_shuffle_distance"](xlist, xlist), 0.0)
ylist = [ReachPoint([1], [7, 6, 6, 6, 6, 6], [1])]
self.assertAlmostEqual(AGGREGATORS["mean_shuffle_distance"](xlist, ylist), 0.8)
def test_mean_squared_shuffle_distance(self):
self.assertEqual(
AGGREGATORS["mean_squared_shuffle_distance"](
self.test_points1, self.model_points1
),
1.0,
)
xlist = [ReachPoint([1], [6, 5, 4, 3, 2, 1], [1])]
self.assertEqual(
AGGREGATORS["mean_squared_shuffle_distance"](xlist, xlist), 0.0
)
ylist = [ReachPoint([1], [7, 6, 6, 6, 6, 6], [1])]
self.assertAlmostEqual(
AGGREGATORS["mean_squared_shuffle_distance"](xlist, ylist), 0.64
)
def test_var_shuffle_distance(self):
xlist = [ReachPoint([1], [6, 5, 4, 3, 2, 1], [1])] * 2
ylist = [
ReachPoint([1], [7, 6, 6, 6, 6, 6], [1]),
ReachPoint([1], [6, 5, 4, 3, 2, 1], [1]),
]
self.assertAlmostEqual(
AGGREGATORS["var_shuffle_distance"](xlist, ylist), 0.16, places=3
)
def test__reach(self):
np.testing.assert_array_equal(_reach([]), np.array([]))
np.testing.assert_array_equal(_reach([self.rp111]), np.array([200.0]))
np.testing.assert_array_equal(
_reach([self.rp111, self.rp112]), np.array([200.0, 210.0])
)
np.testing.assert_array_equal(
_reach([self.rp111, self.rp112], 2), np.array([100.0, 110.0])
)
def test__shuffle_distance(self):
self.assertEqual(
_shuffle_distance(ReachPoint([1], [1]), ReachPoint([1], [2])), 1.0
)
self.assertEqual(
_shuffle_distance(
ReachPoint([1], [6, 5, 4, 3, 2, 1]), ReachPoint([1], [5, 5, 5, 5, 5, 5])
),
0.5,
)
self.assertEqual(
_shuffle_distance(
ReachPoint([1], [5, 4, 3, 2, 2, 2]), ReachPoint([1], [5, 5, 4, 3, 2, 2])
),
1.0 / 3.0,
)
def test_aggregate(self):
pd = aggregate(self.test_points1, self.model_points1)
self.assertEqual(pd["npoints"][0], 3)
self.assertEqual(len(pd.columns), len(AGGREGATORS) + 2)
if __name__ == "__main__":
absltest.main()
|
#!/usr/bin/env python3
import torch
import itertools
from collections import defaultdict
from .lazy_tensor import LazyTensor, delazify
from .non_lazy_tensor import lazify
class CatLazyTensor(LazyTensor):
r"""
A `LazyTensor` that represents the concatenation of other lazy tensors.
Each LazyTensor must have the same shape except in the concatenating
dimension.
Args:
- :attr:`lazy_tensors` (list of LazyTensors):
A list of LazyTensors whose sizes are the same except in
concatenating dimension :attr:`dim`
- :attr:`dim` (int):
The concatenating dimension which can be a batch dimension.
- :attr:`output_device` (torch.device):
The CatLazyTensor will appear to appear on :attr:`output_device`
and place any output `torch.Tensors` on :attr:`output_device`
"""
def __init__(self, *lazy_tensors, dim=0, output_device=None):
if len(lazy_tensors) == 0:
raise RuntimeError("List of LazyTensors must be non-empty")
elif len(lazy_tensors) == 1:
raise RuntimeError("Why are we trying to concatenate a single LazyTensor?")
if not all([isinstance(t, LazyTensor) for t in lazy_tensors]):
raise RuntimeError("CatLazyTensor requires a list of all LazyTensors")
super().__init__(*lazy_tensors, dim=dim, output_device=output_device)
def remove_dim(tuple, dim):
return tuple[:dim] + tuple[dim + 1:]
rep_tensor = lazy_tensors[0]
ndims = rep_tensor.ndimension()
if dim < 0:
dim = ndims + dim
pre_cat_size = tuple(rep_tensor.size()[:dim])
post_cat_size = tuple(rep_tensor.size()[dim + 1:])
cat_dim_len = 0
cat_dim_sizes = []
tensor_idx_to_start_idx = []
for t in lazy_tensors:
if t.ndimension() != ndims:
raise RuntimeError("All tensors must have the same number of dimensions")
if remove_dim(t.size(), dim) != remove_dim(rep_tensor.size(), dim):
raise RuntimeError("All LazyTensors must have the same size in "
"the non-concatenation dimension")
tensor_idx_to_start_idx.append(cat_dim_len)
cat_dim_size = t.size()[dim]
cat_dim_len += cat_dim_size
cat_dim_sizes.append(cat_dim_size)
# using itertools to more quickly join list of lists
idx_to_tensor_idx = [[t_idx] * size for t_idx, size in enumerate(cat_dim_sizes)]
idx_to_tensor_idx = list(itertools.chain.from_iterable(idx_to_tensor_idx))
self.lazy_tensors = lazy_tensors
self.pre_cat_size = pre_cat_size
self.post_cat_size = post_cat_size
self.cat_dim_sizes = cat_dim_sizes
self.cat_dim_len = cat_dim_len
# can't call this attribute self.dim because LazyTensor has a dim() function
self.cat_dim = dim
self.idx_to_tensor_idx = idx_to_tensor_idx
self.tensor_idx_to_start_idx = tensor_idx_to_start_idx
self.tensor_idx_to_end_idx = tensor_idx_to_start_idx[1:] + [sum(self.cat_dim_sizes)]
self.output_device = output_device
def _split_slice(self, slice_idx):
"""
Splits a slice(a, b, None) in to a list of slices [slice(a1, b1, None), slice(a2, b2, None), ...]
so that each slice in the list slices in to a single tensor that we have concatenated with this LazyTensor.
"""
if slice_idx.step is not None:
# TODO: Add support for this eventually.
raise RuntimeError('Slicing a CatLazyTensor with a step is not currently supported!')
start_idx = slice_idx.start if slice_idx.start is not None else 0
stop_idx = slice_idx.stop if slice_idx.stop is not None else self.shape[self.cat_dim]
start_tensor_idx = self.idx_to_tensor_idx[start_idx]
stop_tensor_idx = self.idx_to_tensor_idx[stop_idx - 1]
if start_tensor_idx != stop_tensor_idx:
# By definition, stop is on a later tensor than start since they are in order.
end_idx = self.tensor_idx_to_end_idx[start_tensor_idx]
my_slice = slice(start_idx, end_idx)
if end_idx == stop_idx:
return [my_slice]
else:
# Keep splitting
return [my_slice] + self._split_slice(slice(end_idx, stop_idx, None))
else:
return [slice(start_idx, stop_idx, None)]
def _getitem(self, *indices):
squeeze = [isinstance(i, int) for i in indices]
indices = list(indices)
target_indices = indices[self.cat_dim]
new_cat_dim = (None if squeeze[self.cat_dim]
else self.cat_dim - sum(squeeze[:self.cat_dim + 1]))
eval_result = (torch.is_tensor(indices[-2]) and torch.is_tensor(indices[-1]))
eval_result = eval_result or (isinstance(indices[-2], int) or isinstance(indices[-1], int))
maybe_lazify = delazify if eval_result else lazify
if new_cat_dim is None:
# target_indices must be a int so we can let the LazyTensor squeeze out cat_dim
t_idx = self.idx_to_tensor_idx[target_indices]
return maybe_lazify(self.lazy_tensors[t_idx]._getitem(*indices))
if all(torch.is_tensor(x) for x in indices):
left_indices, right_indices = indices[-2], indices[-1]
batch_indices = indices[:-2]
return maybe_lazify(self._get_indices(left_indices, right_indices, *batch_indices))
if isinstance(target_indices, slice):
if target_indices == slice(None, None, None):
res_list = [lazify(t._getitem(*indices)) for t in self.lazy_tensors]
return maybe_lazify(self.__class__(*res_list, dim=new_cat_dim, output_device=self.output_device))
else:
target_slices = self._split_slice(target_indices)
target_tensors = [self.idx_to_tensor_idx[sl.start] for sl in target_slices]
res_list = []
for idx, t_idx in zip(target_slices, target_tensors):
shifted_start = idx.start - self.tensor_idx_to_start_idx[t_idx]
shifted_stop = idx.stop - self.tensor_idx_to_start_idx[t_idx]
shifted_slice = slice(shifted_start, shifted_stop, idx.step)
indices[self.cat_dim] = shifted_slice
res = lazify(self.lazy_tensors[t_idx]._getitem(*indices))
res_list.append(res)
if len(res_list) == 1:
result = res_list[0]
elif all([rl.dim() == 1 for rl in res_list]):
return maybe_lazify(torch.cat([rl.evaluate().to(self.device) for rl in res_list]))
else:
shape_diffs = torch.tensor(res_list[0].shape) - torch.tensor(res_list[1].shape)
new_cat_dims = (shape_diffs != 0).nonzero()
new_cat_dim = new_cat_dims.item() if new_cat_dims.numel() > 0 else self.cat_dim
result = self.__class__(*res_list, dim=new_cat_dim, output_device=self.output_device)
return maybe_lazify(result.to(self.output_device))
elif torch.is_tensor(target_indices):
# this means another `indices` is a slice object
target_indices = [idx.item() for idx in target_indices]
target_tensors = [self.idx_to_tensor_idx[idx] for idx in target_indices]
res_list = []
curr_tensor, slice_indices = target_tensors[0], []
for idx, t_idx in zip(target_indices, target_tensors):
if t_idx != curr_tensor:
indices[self.cat_dim] = torch.tensor(slice_indices)
new_inds = [ind[:len(slice_indices)] if torch.is_tensor(ind) else ind for ind in indices]
res = lazify(self.lazy_tensors[curr_tensor]._getitem(*new_inds))
res_list.append(res)
curr_tensor, slice_indices = t_idx, []
slice_indices.append(idx - self.tensor_idx_to_start_idx[t_idx])
indices[self.cat_dim] = torch.tensor(slice_indices)
new_inds = [ind[:len(slice_indices)] if torch.is_tensor(ind) else ind for ind in indices]
res = lazify(self.lazy_tensors[t_idx]._getitem(*new_inds))
res_list.append(res)
if len(res_list) == 1:
result = res_list[0]
else:
shape_diffs = torch.tensor(res_list[0].shape) - torch.tensor(res_list[1].shape)
new_cat_dims = (shape_diffs != 0).nonzero()
new_cat_dim = new_cat_dims.item() if new_cat_dims.numel() > 0 else self.cat_dim
result = self.__class__(*res_list, dim=new_cat_dim, output_device=self.output_device)
return maybe_lazify(result.to(self.output_device))
def _get_indices(self, left_indices, right_indices, *batch_indices):
# tensor indices must all have the same length
indices = list(batch_indices) + [left_indices, right_indices]
indices = torch.stack(indices, dim=0)
target_indices = indices[self.cat_dim, :]
target_tensors = [self.idx_to_tensor_idx[idx.item()] for idx in target_indices]
starting_indices = [self.tensor_idx_to_start_idx[t_idx] for t_idx in target_tensors]
local_indices = target_indices - torch.tensor(starting_indices)
indices[self.cat_dim, :] = local_indices
if len(set(target_tensors)) == 1:
# shortcut if target_indices are all on the same LazyTensor
left_indices, right_indices = indices[-2, :], indices[-1, :]
batch_indices = tuple(indices[:-2, :])
return self.lazy_tensors[target_tensors[0]]._get_indices(left_indices, right_indices, *batch_indices)
d = defaultdict(list)
for i, t_idx in enumerate(target_tensors):
d[t_idx].append(i)
res_list = []
for t_idx, slices in sorted(d.items()):
indices_ = indices[:, slices]
left_indices, right_indices = indices_[-2, :], indices_[-1, :]
batch_indices = tuple(indices_[:-2, :])
res = self.lazy_tensors[t_idx]._get_indices(left_indices,
right_indices,
*batch_indices)
res_list.append(res)
# collect all the res in res_list onto one device
res = torch.cat([r.to(self.device) for r in res_list], dim=0)
t_idx_to_res_idx = []
curr_idx = 0
for t_idx in sorted(d.keys()):
t_idx_to_res_idx.append(curr_idx)
curr_idx += len(d[t_idx])
lookup = []
# use the fact that order of elements retrieved from each LazyTensor is
# the same as the order they appear in target_indices
for t_idx in target_tensors:
idx = t_idx_to_res_idx[t_idx]
lookup.append(idx)
t_idx_to_res_idx[t_idx] += 1
return res[lookup]
def _matmul(self, rhs):
isvector = rhs.ndimension() == 1
if isvector:
rhs = rhs.unsqueeze(1)
output_device = (self.device if self.device is not None
else rhs.device)
# make a copy of `rhs` on each device
rhs_ = []
for d in self.devices:
if d != rhs.device:
rhs_.append(rhs.to(d))
else:
rhs_.append(rhs)
if self.cat_dim == self.ndimension() - 2:
res_list = [t._matmul(rhs)
for t, rhs in zip(self.lazy_tensors, rhs_)]
# copy result back to output device
res_list = [x.to(output_device) for x in res_list]
res = torch.cat(res_list, dim=self.cat_dim)
elif self.cat_dim == self.ndimension() - 1:
curr_idx = 0
res_list = []
index = [slice(None, None, None) for _ in range(rhs.ndimension())]
for t, size, rhs in zip(self.lazy_tensors, self.cat_dim_sizes, rhs_):
index[-2] = slice(curr_idx, curr_idx + size, None)
res_list.append(t._matmul(rhs[index]))
curr_idx += size
# copy result back to output device
res_list = [x.to(output_device) for x in res_list]
res = torch.sum(torch.stack(res_list), dim=0)
else:
while rhs.ndimension() < self.ndimension():
rhs = rhs.unsqueeze(0)
curr_idx = 0
res_list = []
index = [slice(None, None, None) for _ in range(self.ndimension())]
for t, size, rhs in zip(self.lazy_tensors, self.cat_dim_sizes, rhs_):
index[self.cat_dim] = slice(curr_idx, curr_idx + size, None)
res_list.append(t._matmul(rhs[index]))
curr_idx += size
# copy result back to output device
res_list = [x.to(output_device) for x in res_list]
res = torch.cat(res_list, dim=self.cat_dim)
if isvector:
res = res.squeeze(-1)
return res
def _size(self):
size = self.pre_cat_size + (self.cat_dim_len,) + self.post_cat_size
return torch.Size(size)
def _transpose_nonbatch(self):
if self.cat_dim == self.ndimension() - 2:
new_dim = self.cat_dim + 1
elif self.cat_dim == self.ndimension() - 1:
new_dim = self.cat_dim - 1
else:
new_dim = self.cat_dim
return self.__class__(*[t._transpose_nonbatch()
for t in self.lazy_tensors], dim=new_dim, output_device=self.output_device)
def diag(self):
"""
As :func:`torch.diag`, returns the diagonal of the matrix :math:`K` this LazyTensor represents as a vector.
Returns:
:obj:`torch.tensor`: The diagonal of :math:`K`. If :math:`K` is :math:`n \times n`, this will be a length
n vector. If this LazyTensor represents a batch (e.g., is :math:`b \times n \times n`), this will be a
:math:`b \times n` matrix of diagonals, one for each matrix in the batch.
"""
size = self.size()
if size[-1] != size[-2]:
raise RuntimeError("Diag works on square matrices (or batches)")
row_col_iter = torch.arange(0, size[-1], dtype=torch.long)
if self.ndimension() == 3:
batch_iter = torch.arange(0, size[0], dtype=torch.long)
batch_iter = batch_iter.unsqueeze(1).repeat(1, size[1]).view(-1)
row_col_iter = row_col_iter.unsqueeze(1).repeat(size[0], 1).view(-1)
res = self._get_indices(row_col_iter, row_col_iter, batch_iter).view(size[0], size[1])
else:
res = self._get_indices(row_col_iter, row_col_iter)
return res.to(self.device)
def __getitem__(self, *indices):
res = super().__getitem__(*indices)
if not isinstance(res, CatLazyTensor):
res = res.to(self.device)
return res
def inv_matmul(self, right_tensor, left_tensor=None):
return super().inv_matmul(right_tensor, left_tensor).to(self.device)
def inv_quad(self, tensor):
return super().inv_quad(tensor).to(self.device)
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
res = super().inv_quad_logdet(inv_quad_rhs, logdet, reduce_inv_quad)
return tuple(r.to(self.device) for r in res)
def matmul(self, other):
return super().matmul(other).to(self.device)
@property
def device(self):
return self.output_device
@property
def devices(self):
return [t.device for t in self.lazy_tensors]
@property
def device_count(self):
return len(set(self.devices))
def to(self, device_id):
"""
returns a new CatLazyTensor with device_id as the output_device
Warning: this does not move the LazyTensors in this CatLazyTensor to
device_id
"""
new_kwargs = dict(self._kwargs)
new_kwargs['output_device'] = device_id
return self.__class__(*self._args, **new_kwargs)
def all_to(self, device_id):
"""
Create a new CatLazyTensor with all LazyTensors in CatLazyTensor moved
to one device device. The new CatLazyTensor also has device_id as the
output_device.
"""
new_args = []
new_kwargs = {}
for arg in self._args:
if hasattr(arg, "to"):
new_args.append(arg.to(device_id))
else:
new_args.append(arg)
for name, val in self._kwargs.items():
if hasattr(val, "to"):
new_kwargs[name] = val.to(device_id)
else:
new_kwargs[name] = val
new_kwargs['output_device'] = device_id
return self.__class__(*new_args, **new_kwargs)
|
import re
import tempfile
import typing as tp
import webbrowser
from functools import cached_property
from typing import TYPE_CHECKING
from urllib.parse import urljoin
import httpx
from bs4 import BeautifulSoup, Tag
from robox._controls import Submit
from robox._form import Form
from robox._link import (
Link,
find_all_a_tags_with_href,
remove_duplicate_links,
remove_page_jumps_from_links,
)
from robox._table import Table
if TYPE_CHECKING:
from robox import Robox
T = tp.TypeVar("T", bound="Robox")
class BasePage:
def __init__(self, response: httpx.Response, robox: T) -> None:
self.response = response
self.content = self.response.content
self.url = self.response.url
self.robox = robox
@property
def status_code(self) -> int:
return self.response.status_code
@property
def from_cache(self) -> bool:
try:
return self.response.from_cache
except AttributeError:
return False
@cached_property
def parsed(self) -> BeautifulSoup:
return BeautifulSoup(self.content, **self.robox.options.soup_kwargs)
@cached_property
def title(self) -> str:
title = self.parsed.title
if title:
return title.text
@cached_property
def description(self) -> tp.Optional[str]:
description = self.parsed.find("meta", {"name": "description"})
if description:
return description["content"]
def get_form(self, *args: tp.Any, **kwargs: tp.Any) -> tp.Optional[Form]:
form = self.parsed.find(name="form", *args, **kwargs)
if not form:
raise ValueError("No form found")
return Form(form)
def get_forms(self, *args: tp.Any, **kwargs: tp.Any) -> tp.List[Form]:
forms = self.parsed.find_all(name="form", *args, **kwargs)
if not forms:
raise ValueError("No forms found")
return [Form(form) for form in forms]
def get_tables(self, *args: tp.Any, **kwargs: tp.Any) -> tp.List[Table]:
tables = self.parsed.find_all(name="table", *args, **kwargs)
if not tables:
raise ValueError("No tables found")
return [Table(table) for table in tables]
def _prepare_referer_header(self) -> tp.Dict[str, str]:
headers = {}
if "Referer" not in self.response.headers:
headers["Referer"] = str(self.response.url)
return headers
def get_links(
self, only_internal_links: bool = False, *args: tp.Any, **kwargs: tp.Any
) -> tp.Generator[Link, None, None]:
links = find_all_a_tags_with_href(self.parsed, *args, **kwargs)
links = remove_page_jumps_from_links(links)
links = remove_duplicate_links(links)
if only_internal_links:
links = only_internal_links(links, self.url.host)
for href, text in links:
yield Link(href=href, text=text.strip())
def get_links_by_regex(
self, regex: str, *args: tp.Any, **kwargs: tp.Any
) -> tp.List[Link]:
return [
link
for link in self.get_links(*args, **kwargs)
if re.search(regex, link.href)
]
def _get_links_by_text(
self, text: str, *args: tp.Any, **kwargs: tp.Any
) -> tp.List[Link]:
return [
link
for link in self.get_links(*args, **kwargs)
if text.lower() == link.text.lower()
]
def _get_link_text(self, text: str) -> Link:
links = self.get_links_by_text(text)
if not links:
raise ValueError(f"No link with text {text} found")
if len(links) > 1:
raise ValueError(f"Multiple links with text {text} found")
return links[0]
def debug_page(self) -> None:
with tempfile.NamedTemporaryFile("w", delete=False, suffix=".html") as f:
url = f"file://{f.name}"
f.write(str(self.parsed))
webbrowser.open(url)
def __hash__(self) -> int:
return hash(tuple([self.parsed, self.url]))
def __eq__(self, other: object) -> bool:
return (
isinstance(other, BasePage)
and self.parsed == other.parsed
and self.url == other.url
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} url={self.url}>"
class Page(BasePage):
def submit_form(
self, form: Form, submit_button: tp.Union[str, Submit] = None
) -> "Page":
payload = form.to_httpx(submit_button)
headers = self._prepare_referer_header()
return self.robox.open(
url=self.response.url.join(form.action),
method=form.method,
headers=headers,
**payload,
)
def follow_link(self, link: Link) -> "Page":
return self.robox.open(urljoin(str(self.url), link.href))
def follow_link_by_tag(self, tag: Tag) -> "Page":
return self.robox.open(urljoin(str(self.url), tag["href"]))
def follow_link_by_text(self, text: str) -> "Page":
link = self._get_link_text(text)
return self.follow_link(link)
class AsyncPage(BasePage):
async def submit_form(
self, form: Form, submit_button: tp.Union[str, Submit] = None
) -> "AsyncPage":
payload = form.to_httpx(submit_button)
headers = self._prepare_referer_header()
return await self.robox.open(
url=self.response.url.join(form.action),
method=form.method,
headers=headers,
**payload,
)
async def follow_link(self, link: Link) -> "AsyncPage":
return await self.robox.open(urljoin(str(self.url), link.href))
async def follow_link_by_tag(self, tag: Tag) -> "AsyncPage":
return await self.robox.open(urljoin(str(self.url), tag["href"]))
async def follow_link_by_text(self, text: str) -> "AsyncPage":
link = self._get_link_text(text)
return await self.follow_link(link)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Indictrans and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
#from erpnext.utilities.address_and_contact import load_address_and_contact
import json
class FFWW(Document):
def validate(self):
self.validate_designation()
self.validate_ffww()
self.validate_duplication_emailid()
# self.validate_dupicate_designation()
self.set_fww_name()
if self.contact:
self.update_contact_status()
def on_update(self):
# if any extra row is added in contact details child table same data will be reflected in contact doctype against same contact.
if self.get('more_contact_details'):
for d in self.get('more_contact_details'):
if d.ffww == 'New FFWW 1' or self.name:
if d.contact_name:
contact = frappe.get_doc("Contact Details", d.contact_name)
contact.ffww = self.name
contact.save()
else:
main_contact = frappe.get_doc('Contact',self.contact)
if not filter(lambda co:co.email_id == d.email_id, main_contact.contacts):
ch = main_contact.append('contacts', {})
ch.contact_type = d.contact_type
ch.country_name = d.country_name
ch.country_code = d.country_code
ch.mobile_no = d.mobile_no
ch.email_id = d.email_id
ch.landline = d.landline
ch.ffww = self.name
main_contact.save()
if ch.name:
ffww_contact = frappe.get_doc("FFWW Contact Details", d.name)
ffww_contact.contact_name = ch.name
ffww_contact.save()
if d.name and d.ffww == 'New FFWW 1':
ffww_contact = frappe.get_doc("FFWW Contact Details", d.name)
ffww_contact.ffww = self.name
ffww_contact.save()
def validate_designation(self):
if not self.get('designation'):
frappe.msgprint("At least one designation must be specified in designation child table",raise_exception=1)
def validate_ffww(self):
if frappe.db.sql("""select name from `tabFFWW` where customer='%s' and contact='%s' and name!='%s'"""%(self.customer,self.contact,self.name)):
name = frappe.db.sql("""select name from `tabFFWW` where customer='%s' and contact='%s'
and name!='%s'"""%(self.customer,self.contact,self.name),as_list=1)
frappe.msgprint("Customer %s already linked with contact %s in record %s"%(self.customer,self.contact,name[0][0]),raise_exception=1)
def validate_dupicate_designation(self):
designation_list = []
if self.get('designation'):
for d in self.get('designation'):
if d.designation not in designation_list:
designation_list.append(d.designation)
else:
frappe.msgprint("Duplicate designation name is not allowed",raise_exception=1)
break
def validate_duplication_emailid(self):
email_list = []
if self.get('more_contact_details'):
for d in self.get('more_contact_details'):
if d.email_id not in email_list:
email_list.append(d.email_id)
else:
frappe.msgprint("Duplicate Email ID is not allowed",raise_exception=1)
break
def update_contact_status(self):
contact = frappe.get_doc('Contact',self.contact)
contact.status = 'Active'
contact.save()
def set_fww_name(self):
self.ffww_record = self.name
def clear_child_table(self):
self.set('more_contact_details', [])
# Create address............................................
@frappe.whitelist()
def make_address(source_name, target_doc=None):
return _make_address(source_name, target_doc)
def _make_address(source_name, target_doc=None, ignore_permissions=False):
def set_missing_values(source, target):
pass
doclist = get_mapped_doc("FFWW", source_name,
{"FFWW": {
"doctype": "Address",
"field_map": {
"contact": "contact"
# "company_name": "customer_name",
# "contact_no": "phone_1",
# "fax": "fax_1"
}
}}, target_doc, set_missing_values, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_contact(contact=None):
contact_details = []
contact_details = frappe.db.get_values('Contact Details',{'parent':contact},['contact_type','email_id','mobile_no','country_code','ffww','name','country_name'])
if len(contact_details)>0:
return contact_details
else:
return contact_details
def get_active_customers(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, customer_name
from `tabCustomer` where customer_name like %(txt)s
or name like %(txt)s""",{"txt":"%%%s%%" % txt}, as_list=1)
def get_contact_list(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(""" select name, email, mobile
from `tabContact`
where name like %(txt)s or email like %(txt)s
or mobile like %(txt)s """,{"txt": "%%%s%%" % txt}, as_list=1) |
<reponame>fluiddyn/transonic<gh_stars>10-100
"""Capture the external nodes used in functions
===============================================
"""
import gast as ast
from transonic.analyses import beniget
from transonic.analyses import extast
class CaptureX(ast.NodeVisitor):
"""Capture the external nodes used in functions, classes and blocks"""
def __init__(
self,
functions,
module_node,
ancestors=None,
defuse_chains=None,
usedef_chains=None,
consider_annotations=True,
blocks=None,
):
if defuse_chains is None:
self.du_chains = du = beniget.DefUseChains()
du.visit(module_node)
self.ud_chains = beniget.UseDefChains(du)
self.ancestors = beniget.Ancestors()
self.ancestors.visit(module_node)
else:
self.du_chains = defuse_chains
self.ud_chains = usedef_chains
self.ancestors = ancestors
self.consider_annotations = consider_annotations
self._annot = None
self.external = []
self.visited_external = set()
self.functions = functions
for func in functions:
self.func = func
self.visit(func)
if blocks is None:
return
for nodes in blocks:
if nodes:
node = nodes[0]
self.func = ancestors.parentFunction(node)
for node in nodes:
self.visit(node)
def visit_Name(self, node):
parent_node = self.ancestors.parents(node)[-1]
if (
isinstance(parent_node, ast.FunctionDef)
and node == parent_node.returns
and not self.consider_annotations
):
return
# register load of identifiers not locally defined
if isinstance(node.ctx, ast.Load):
try:
self.ud_chains.chains[node]
except KeyError:
if not (
isinstance(parent_node, ast.FunctionDef)
and node == parent_node.returns
):
raise
from warnings import warn
warn(f"BUG Beniget (node.id={node.id}), but we try to continue!")
return
for def_ in self.ud_chains.chains[node]:
try:
parents = self.ancestors.parents(def_.node)
except KeyError:
return # a builtin
if self.func not in parents:
if isinstance(def_.node, ast.FunctionDef):
defining_node = def_.node
else:
defining_node = self.ancestors.parentStmt(def_.node)
if defining_node not in self.visited_external:
self.rec(defining_node)
if defining_node in self.functions:
return
if (
self.consider_annotations == "only"
and self._annot is None
):
return
if defining_node not in self.visited_external:
self.visited_external.add(defining_node)
self.external.append(defining_node)
elif (
isinstance(node.ctx, (ast.Param, ast.Store))
and self.consider_annotations
):
if node.annotation is None:
return
self._annot = node.annotation
self.visit(node.annotation)
self._annot = None
def visit_ClassDef(self, node_class):
for node in node_class.body:
if isinstance(node, ast.AnnAssign):
self._annot = node.annotation
self.visit(node)
self._annot = None
def visit_AnnAssign(self, node):
if self.consider_annotations:
self._annot = node.annotation
self.visit(node.annotation)
self._annot = None
if node.value is not None and self.consider_annotations != "only":
self.visit(node.value)
def rec(self, node):
"walk definitions to find their operands's def"
if node is self.func:
return
if isinstance(node, ast.Assign):
self.visit(node.value)
elif isinstance(node, ast.FunctionDef):
# tmp: to deal with @include
node.decorator_list = []
old_func = self.func
self.func = node
self.visit(node)
self.func = old_func
# TODO: implement this for AugAssign etc
def make_code_external(self):
code = []
for node in self.external:
code.append(extast.unparse(node).strip())
return "\n".join(code)
if __name__ == "__main__":
code = "a = 1; b = [a, a]\ndef foo():\n return b"
# code = "a = 1; b = len([a, a])\ndef foo():\n return b"
# code = "import numpy as np\na = np.int(1)\ndef foo():\n return np.zeros(a)"
code = """
a = 1
def fooo():
return 1
def foo():
return a + fooo()
def bar():
return foo()
"""
module = extast.parse(code)
function = module.body[3]
capturex = CaptureX((function,), module)
print(capturex.make_code_external())
|
<filename>loxpy/Parser.py
from typing import List
from .Expr import *
from .ParserError import ParseError
from .Stmt import *
from .Tokens import TokenType, Token
class Parser:
def __init__(self, tokens: List[Token]):
self.tokens = tokens
self.current = 0
# expression -> equality ;
def expression(self) -> Expr:
return self.assignment()
# equality -> comparison ( ( "!=" | "==" ) comparison )* ;
def equality(self) -> Expr:
expr = self.comparison()
# 循环相当于 * 的作用,对于 match() , 相当于 |
while self.match(TokenType.BANG_EQUAL, TokenType.EQUAL_EQUAL):
operator = self.previous()
right = self.comparison()
expr = Binary(expr, operator, right)
return expr
# comparison -> addition ( ( ">" | ">=" | "<" | "<=" ) addition )* ;
def comparison(self) -> Expr:
expr = self.addition()
while self.match(TokenType.GREATER, TokenType.GREATER_EQUAL, TokenType.LESS, TokenType.LESS_EQUAL):
operator = self.previous()
right = self.addition()
expr = Binary(expr, operator, right)
return expr
# addition -> multiplication (("-"|"+") multiplication)*;
def addition(self) -> Expr:
expr = self.multiplication()
while self.match(TokenType.MINUS, TokenType.PLUS):
operator = self.previous()
right = self.multiplication()
expr = Binary(expr, operator, right)
return expr
# multiplication -> unary (("/"|"*") unary)*;
def multiplication(self) -> Expr:
expr = self.unary()
while self.match(TokenType.SLASH, TokenType.STAR):
operator = self.previous()
right = self.unary()
expr = Binary(expr, operator, right)
return expr
# unary -> ("!" | "-"); unary | primary;
def unary(self) -> Expr:
if self.match(TokenType.BANG, TokenType.MINUS):
operator = self.previous()
right = self.unary()
return Unary(operator, right)
else:
return self.call()
def call(self):
expr = self.primary()
while True:
if self.match(TokenType.LEFT_PAREN):
expr = self.finishCall(expr) # 对可调用对象求值
else:
break
return expr
def primary(self) -> Expr:
if self.match(TokenType.FALSE): return Literal(False)
if self.match(TokenType.TRUE): return Literal(True)
if self.match(TokenType.NIL): return Literal(None)
if self.match(TokenType.NUMBER, TokenType.STRING):
return Literal(self.previous().literal)
if self.match(TokenType.IDENTIFIER):
return Variable(self.previous())
if self.match(TokenType.LEFT_PAREN):
expr = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Expect ')' after expression")
return Grouping(expr)
raise self.error(self.peek(), "Except expression.")
# infrastructure primitive operation
def match(self, *types: TokenType) -> bool:
"""checking to see if the current token is any of the given types"""
# 只要有一个匹配就返回真
for type in types:
if self.check(type):
self.advance()
return True
return False
def consume(self, type: TokenType, message: str) -> Token:
"""match, advance and raise parser error"""
if self.check(type):
return self.advance()
else:
raise self.error(self.peek(), message)
def error(self, token: Token, message: str):
err = ParseError(token, message)
err.report()
return err
def check(self, tokenType: TokenType) -> bool:
if self.isAtEnd(): return False # REVIEW: 注意此处是否定
return self.peek().type == tokenType # REVIEW: 再次注意 match 和 peek 的区别,match 通过 advance 消耗 token
def advance(self) -> Token:
if not self.isAtEnd(): self.current += 1
return self.previous()
def isAtEnd(self) -> bool:
return self.peek().type == TokenType.EOF
def peek(self) -> Token:
return self.tokens[self.current]
def previous(self) -> Token:
return self.tokens[self.current - 1]
def sychronize(self) -> None:
self.advance()
while not self.isAtEnd(): # REVIEW: 不止一次忘记的 not
if self.previous().type == TokenType.SEMICOLON: return
if self.peek().type in [TokenType.CLASS,
TokenType.FUN,
TokenType.VAR,
TokenType.FUN,
TokenType.IF,
TokenType.WHILE,
TokenType.PRINT,
TokenType.RETURN]:
return
self.advance()
def parse(self) -> List[Stmt]:
statements = []
while not self.isAtEnd():
statements.append(self.declaration())
return statements
def statement(self) -> Stmt:
if (self.match(TokenType.FOR)): return self.forStatement()
if (self.match(TokenType.IF)): return self.ifStatement()
if (self.match(TokenType.PRINT)): return self.printStatement()
if (self.match(TokenType.RETURN)): return self.returnStatement()
if (self.match(TokenType.WHILE)): return self.whileStatement()
if (self.match(TokenType.LEFT_BRACE)): return Block(self.block())
return self.expressionStatement()
def printStatement(self) -> Stmt:
value = self.expression()
self.consume(TokenType.SEMICOLON, "Except ';' after value.")
return Print(value)
def expressionStatement(self) -> Stmt:
"""expression statement is a expression with a semicolon end"""
expr = self.expression()
self.consume(TokenType.SEMICOLON, "Except ';' after expression.")
return Expression(expr)
def block(self) -> List[Stmt]:
statements = []
while not self.check(TokenType.RIGHT_BRACE) and not self.isAtEnd():
statements.append(self.declaration())
self.consume(TokenType.RIGHT_BRACE, "Except '}' after block.")
return statements
def assignment(self):
expr = self.or_()
if self.match(TokenType.EQUAL):
equals = self.previous()
value = self.assignment()
if isinstance(expr, Variable):
name = expr.name
return Assign(name, value)
self.error(equals, "Invalid assignment target.")
return expr
def declaration(self):
try:
if self.match(TokenType.FUN): return self.function('function')
if self.match(TokenType.VAR): return self.varDeclaration()
return self.statement()
except ParseError as error:
self.sychronize()
return None
def varDeclaration(self):
name = self.consume(TokenType.IDENTIFIER, "Except variable name.")
initializer = None
if self.match(TokenType.EQUAL):
initializer = self.expression()
self.consume(TokenType.SEMICOLON, "Expcet ';' after variable declaration")
return Var(name, initializer)
def ifStatement(self):
self.consume(TokenType.LEFT_PAREN, "Except '(' after if.")
condition = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Except ')' after if conditon.")
theBranch = self.statement()
elseBranch = None
if self.match(TokenType.ELSE):
elseBranch = self.statement()
return If(condition, theBranch, elseBranch)
def or_(self):
expr = self.and_()
while self.match(TokenType.OR):
operator = self.previous()
right = self.and_()
expr = Logical(expr, operator, right)
return expr
def and_(self):
expr = self.equality()
while self.match(TokenType.AND):
operator = self.previous()
right = self.equality()
expr = Logical(expr, operator, right)
return expr
def whileStatement(self):
self.consume(TokenType.LEFT_PAREN, "Except '(' after 'while'.")
condition = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Except ') after condition.")
body = self.statement()
return While(condition, body)
def forStatement(self):
self.consume(TokenType.LEFT_PAREN, "Except '(' after 'for'.")
if self.match(TokenType.SEMICOLON):
initializer = None
elif self.match(TokenType.VAR):
initializer = self.varDeclaration()
else:
initializer = self.expressionStatement()
condition = None
if not self.check(TokenType.SEMICOLON):
condition = self.expression()
self.consume(TokenType.SEMICOLON, "Except ';' after loop condition.")
increment = None
if not self.check(TokenType.RIGHT_PAREN):
increment = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Excpet ';' after for caluses.")
body = self.statement()
if increment is not None:
body = Block([body, Expression(increment)])
if condition is None:
condition = Literal(True)
body = While(condition, body)
if initializer is not None:
body = Block([initializer, body])
return body
def finishCall(self, callee: Expr):
arguments = []
if not self.check(TokenType.RIGHT_PAREN):
arguments.append(self.expression())
while self.match(TokenType.COMMA):
if len(arguments) >= 8: self.error(self.peek(), "Cannot have more than 8 arguments.")
arguments.append(self.expression())
paren = self.consume(TokenType.RIGHT_PAREN, "Expect ')' after arguments.")
return Call(callee, paren, arguments)
def function(self, kind: str) -> Function:
name = self.consume(TokenType.IDENTIFIER, f"Expect '(' after {kind} name.")
self.consume(TokenType.LEFT_PAREN, f"Expect '(' after {kind} name.")
parameters: List[Token] = []
if not self.check(TokenType.RIGHT_PAREN):
while True:
if len(parameters) >= 8:
self.error(self.peek(), "Cannot have more than 8 parameters.")
parameters.append(self.consume(TokenType.IDENTIFIER, "Except parameter name."))
if not self.match(TokenType.COMMA):
break # REVIEW: 用 while if break 模拟 do while 时注意 if condition 是否定的
self.consume(TokenType.RIGHT_PAREN, "Except ')' after parameters.")
self.consume(TokenType.LEFT_BRACE, "Expect '{{' before {} body.".format(kind))
body = self.block()
return Function(name, parameters, body)
def returnStatement(self):
keyword = self.previous()
value = None
if not self.check(TokenType.SEMICOLON):
value = self.expression()
self.consume(TokenType.SEMICOLON, "Expect ';' after return value.")
return Return(keyword, value)
|
<filename>Parallel_ACO_Solver/Docker-master/master.py
import json
import logging
import os
import random
import socket
import string
# import hug
import sys
from collections import namedtuple
from threading import Thread
logging.getLogger("falcon").setLevel(logging.WARNING)
input_problem = 'text_problem_good.txt'
NODE = namedtuple('NODE', 'uid,address,demand,tis,tie,st,isReal,becomeRealProbability,db_uid,lat,lng')
ANT = namedtuple('ANT', 'capacity,tis,tie')
local_db = json.load(open('final_matrix.json'))
SLAVES = {}
MAX_BUFFER_SIZE = 1024 * 5000
BIND_PORT = os.getenv('BIND_PORT', 8081)
BIND_HOST = os.getenv('BIND_HOST', '0.0.0.0')
VALID_EVENTS = namedtuple('events', 'NEW_SLAVE,SLAVE_CHECK,NEW_TASK')('new_slave', 'check_slave', 'new_task')
def setup_logger(verbosity_level, name=None):
root = logging.getLogger()
root.handlers = []
if name:
root.name = name
root.setLevel(verbosity_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(verbosity_level)
ch.setFormatter(formatter)
root.addHandler(ch)
def get_db_info(addr):
uid = local_db['addresses'].get(addr, None)
lat, lng = local_db['coordinates'][uid]
return uid, lat, lng
# @hug.post()
# def new_task(raw_task: hug.types.text):
# """Add new task"""
#
# def get_solution(host, port, task, index, key):
# try:
# s = socket.socket()
# s.connect((host, port))
# s.send(task.encode())
# s.settimeout(50)
# tmp = s.recv(MAX_BUFFER_SIZE)
# logging.info('Got raw solution from slave %s: %s' % (key, tmp))
# results[index] = (key, json.loads(tmp.decode('utf-8')))
# except Exception as e:
# logging.error('Exception in thread: %s' % e)
# results[index] = (k, {})
# return True
#
# logging.warning('Master got new task!')
# logging.warning('Going to create %s threads' % len(SLAVES))
# if not SLAVES:
# return hug.HTTP_500
# threads = []
# results = [{} for _ in SLAVES.items()]
# i = 0
# for k, v in SLAVES.items():
# process = Thread(target=get_solution, args=[v['host'], v['port'], raw_task, i, k])
# process.start()
# threads.append(process)
# i += 1
#
# for process in threads:
# process.join()
#
# logging.warning('Got %s results' % len(results))
# # print('Here they are: %s' % results)
# best_solution = None
# for k, result in results:
# if not result:
# logging.warning('Something wrong with the %s slave, removing it!' % k)
# SLAVES.pop(k, None)
# continue
# logging.warning(result[0]['totals'])
# res_dist = result[0]['totals']['distance']
# if not best_solution:
# best_solution = result
# else:
# if res_dist < best_solution[0]['totals']['distance']:
# best_solution = result
# return best_solution
#
#
# @hug.put()
# def new_slave(slave_ip: hug.types.text, slave_port: hug.types.number):
# """Add new slave"""
# key = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
# SLAVES[key] = {'host': slave_ip, 'port': slave_port, 'got_solution': False}
# logging.warning('New slave: %s' % SLAVES[key])
# return {'key': key}
#
#
# @hug.put()
# def check_slave(slave_ip: hug.types.text, slave_port: hug.types.number, slave_key: hug.types.text):
# """Add new slave"""
# if slave_key not in SLAVES:
# SLAVES[slave_key] = {'host': slave_ip, 'port': slave_port, 'got_solution': False}
# logging.warning('New slave: %s' % SLAVES)
# return {'key': slave_key}
#
#
# @hug.get()
# def get_slaves():
# return SLAVES
def solve_task(raw_task, task_setter_connection=None, async=True):
def get_solution(conn, task, index, key):
try:
conn.sendall(task.encode())
conn.settimeout(60 * 15)
tmp = conn.recv(MAX_BUFFER_SIZE).decode('utf-8')
while tmp[-1] != ']':
tmp += conn.recv(MAX_BUFFER_SIZE).decode('utf-8')
logging.debug('Got raw solution from slave %s: %s' % (key, tmp))
results[index] = (key, json.loads(tmp))
except Exception as e:
logging.error('Exception in thread: %s' % e)
results[index] = (k, {})
return True
threads = []
results = [{} for _ in SLAVES.items()]
i = 0
for k, v in SLAVES.items():
process = Thread(target=get_solution, args=[v['connection'], raw_task, i, k])
process.start()
threads.append(process)
i += 1
for process in threads:
process.join()
logging.warning('Got %s result(s)' % len(results))
best_solution = None
for k, result in results:
if not result:
logging.warning('Something wrong with the %s slave, removing it!' % k)
SLAVES.pop(k, None)
continue
logging.warning(result[0]['totals'])
res_dist = result[0]['totals']['distance']
res_time = result[0]['totals']['time']
if not best_solution:
best_solution = result
else:
if res_dist < best_solution[0]['totals']['distance']:
best_solution = result
if async:
logging.info('Sending the solution back!')
task_setter_connection.sendall((json.dumps(best_solution) + '&').encode('utf-8'))
else:
return best_solution
def start_server():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.info('Rocket launching!')
try:
soc.bind((BIND_HOST, BIND_PORT))
logging.info('Binding on port %s completed' % BIND_PORT)
except socket.error as msg:
logging.error('Bind failed. Error : ' + str(sys.exc_info()))
sys.exit()
soc.listen(10)
logging.info('Master is ready!')
# from threading import Thread
while True:
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
logging.debug('Accepting connection from ' + ip + ':' + port)
try:
initial_event = conn.recv(MAX_BUFFER_SIZE).decode('utf-8')
while initial_event[-1] != '}':
initial_event += conn.recv(MAX_BUFFER_SIZE).decode('utf-8')
logging.debug('Master got message: %s' % initial_event)
initial_event = json.loads(initial_event)
event = initial_event.get('event', None)
if not event:
logging.error('Corrupted data! Event not found')
continue
if event == VALID_EVENTS.NEW_SLAVE:
key = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
SLAVES[key] = {'host': ip, 'port': port, 'connection': conn}
logging.warning('New slave: %s' % key)
conn.sendall(json.dumps({'event': event, 'data': {'key': key}}).encode('utf-8'))
elif event == VALID_EVENTS.NEW_TASK:
logging.warning('Got new task, preparing guns!')
raw_task = initial_event.get('data', {}).get('raw_task', '')
logging.info('Solving...')
# solution = solve_task(json.dumps({'event': event, 'data': {'raw_task': raw_task}}), conn, async=False)
# conn.sendall(solution.encode())
process = Thread(target=solve_task, args=[json.dumps({'event': event, 'data': {'raw_task': raw_task}}),
conn])
process.start()
else:
logging.error('Unknown event: %s!' % event)
except:
logging.error("Terrible error!")
import traceback
traceback.print_exc()
soc.close()
if __name__ == '__main__':
setup_logger(logging.INFO, name='master')
start_server()
|
<reponame>DiceNameIsMy/recruiting<gh_stars>0
# Generated by Django 3.2.4 on 2021-06-27 14:44
from django.db import migrations, models
import recruiting.utils.handler
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Experience',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company', models.CharField(max_length=256, verbose_name='Компания')),
('position', models.CharField(max_length=128, verbose_name='Должность')),
('start_date', models.DateField(verbose_name='Дата начала работы')),
('end_date', models.DateField(blank=True, null=True, verbose_name='Дата окончания работы')),
('to_present', models.BooleanField(verbose_name='Работает по настоящее время:')),
],
options={
'verbose_name': 'Опыт работы',
'verbose_name_plural': 'Опыт работы',
},
),
migrations.CreateModel(
name='KeySkill',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='Название')),
],
options={
'verbose_name': 'Навык',
'verbose_name_plural': 'Навыки',
},
),
migrations.CreateModel(
name='Respond',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cover_letter', models.CharField(blank=True, max_length=256, verbose_name='Приложенное письмо')),
('date', models.DateTimeField(auto_now_add=True)),
('invited', models.BooleanField(default=False)),
('text', models.CharField(blank=True, max_length=256)),
],
options={
'verbose_name': 'Отклик',
'verbose_name_plural': 'Отклики',
},
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(blank=True, upload_to='recruiting/resume/avatar', verbose_name='Аватар')),
('header', models.CharField(max_length=128)),
('text', models.TextField(max_length=8192)),
('education', models.CharField(choices=[('SE', 'Среднее'), ('SS', 'Среднее специальное'), ('BC', 'Бакалавр'), ('MS', 'Магистратура'), ('DC', 'Докторантур наук')], max_length=2, null=True, verbose_name='Образование')),
('edu_institution', models.CharField(blank=True, max_length=64, verbose_name='Учебное заведение')),
('specialization', models.CharField(blank=True, max_length=64)),
('edu_end_year', models.IntegerField(blank=True, default=recruiting.utils.handler.current_year, verbose_name='Год окончания')),
('is_open', models.BooleanField(default=False, verbose_name='Виден ли всему интернету')),
('last_modified', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Резюме',
'verbose_name_plural': 'Резюме',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header', models.CharField(max_length=128, verbose_name='Заголовок')),
('text', models.TextField(max_length=8192, verbose_name='Основное описание')),
('salary', models.IntegerField(blank=True, null=True, verbose_name='Зарплата')),
('employment', models.CharField(choices=[('FT', 'Full time'), ('PT', 'Part time'), ('WP', 'Work placement'), ('PW', 'Project work'), ('VW', 'Volunteering')], default='FD', max_length=2, verbose_name='Занятость')),
('schedule', models.CharField(choices=[('FD', 'Full day'), ('RM', 'Remote work'), ('SH', 'Shift schedule'), ('RB', 'Rotation based'), ('FS', 'Flexible schedule')], default='FT', max_length=2, verbose_name='График работы')),
],
options={
'verbose_name': 'Вакансия',
'verbose_name_plural': 'Вакансии',
},
),
]
|
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.apps import apps
from django.core import serializers
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.db.models import Q
from .models import Article
from users.models import User
from main.views import status, createJsonResponse, createQuery
# Create your views here.
@method_decorator(csrf_exempt, name="dispatch")
class index(View):
def get(self, request, *args, **kwargs):
data = Article.objects.all()
if data.count()==0:
return JsonResponse(status(status="info", message="Article is empty!"))
return JsonResponse(createJsonResponse(data), safe=False)
def post(self, request, *args, **kwargs):
return JsonResponse(status(status="error", message="Method not valid!"))
@method_decorator(csrf_exempt, name="dispatch")
class addPost(View):
def post(self, request, *args, **kwargs):
if request.method == "POST":
article = Article()
data = request.POST
try:
users = User.objects.get(api_key=data['apikey'])
article.title=data['title']
article.description=data['desc']
article.author=users
article.save()
except Exception as e:
return JsonResponse(status(status="error", message=f"Data {e} not found!"))
return JsonResponse(status(status="ok", message="Success Upload Article"))
def get(self, request, *args, **kwargs):
return JsonResponse(status(status="error", message="Method not valid!"))
@method_decorator(csrf_exempt, name="dispatch")
class updatePost(View):
def post(self, request, *args, **kwargs):
from django.utils import timezone
if request.method == "POST":
try:
id = request.GET['id']
data = Article.objects.get(pk=id)
body = request.POST
users = User.objects.get(api_key=body['apikey'])
if users is not None:
if users.api_key == data.author.api_key:
data.title = body['title']
data.description = body['desc']
data.save()
return JsonResponse(status(status="success", message="Success Update Article"), safe=False)
return JsonResponse(status(status="error", message="You are not the author!"), safe=False)
return JsonResponse(status(status="error", message="Your API KEY is invalid!"), safe=False)
except Exception as e:
return JsonResponse(status(status="error", message=f"Data {e} not found!"))
def get(self, request, *args, **kwargs):
return JsonResponse(status(status="error", message="Method not valid!"))
def viewPost(request):
query = createQuery(request)
data = Article.objects.filter(query).all()
if data.count()==0:
return JsonResponse(status(status="info", message="Article is empty!"), safe=False)
return JsonResponse(createJsonResponse(data), safe=False)
def viewUserPost(request, user):
if (User.objects.filter(username=user).exists()):
try:
user = User.objects.get(username=user).pk
query = createQuery(request, user=user)
data = Article.objects.filter(query, author=user).all()
if data.count()==0:
return JsonResponse(status(status="info", message="Article is empty!"), safe=False)
return JsonResponse(createJsonResponse(data), safe=False)
except ValueError as e:
return JsonResponse(status(status="error", message="Parameter invalid, Please input it Correctly!"))
@method_decorator(csrf_exempt, name="dispatch")
class deletePost(View):
def post(self, request, *args, **kwargs):
try:
id = request.GET['id']
data = Article.objects.get(pk=id)
body = request.POST
try:
users = User.objects.get(api_key=body['apikey'])
if users is not None:
if users.api_key == data.author.api_key:
data.delete()
return JsonResponse(status(status="success", message="Success Delete Article"), safe=False)
return JsonResponse(status(status="error", message="You are not the author!"), safe=False)
except:
return JsonResponse(status(status="error", message="Your API KEY is invalid!"), safe=False)
except Exception as e:
return JsonResponse(status(status="error", message=f"Data {e} not found!"))
|
import logging
import sys
import os
import requests as req
from collections import OrderedDict
import cartosql
import lxml
from xmljson import parker as xml2json
from dateutil import parser
import requests
import datetime
import json
# do you want to delete everything currently in the Carto table when you run this script?
CLEAR_TABLE_FIRST = False
# Carto username and API key for account where we will store the data
CARTO_USER = os.getenv('CARTO_USER')
CARTO_KEY = os.getenv('CARTO_KEY')
# name of table in Carto where we will upload the data
CARTO_TABLE = 'dis_003_volcano_reports'
# column of table that can be used as a unique ID (UID)
UID_FIELD = 'uid'
# column that stores datetime information
TIME_FIELD = 'pubdate'
# column names and types for data table
# column names should be lowercase
# column types should be one of the following: geometry, text, numeric, timestamp
CARTO_SCHEMA = OrderedDict([
('uid', 'text'),
('the_geom', 'geometry'),
('pubdate', 'timestamp'),
('volcano_name', 'text'),
('country_name', 'text'),
('description', 'text'),
('sources', 'text')
])
# how many rows can be stored in the Carto table before the oldest ones are deleted?
MAX_ROWS = 1000000
# oldest date that can be stored in the Carto table before we start deleting
MAX_AGE = datetime.datetime.today() - datetime.timedelta(days=365*5)
# url for USGS Weekly Volcanic Activity Report
SOURCE_URL = "http://volcano.si.edu/news/WeeklyVolcanoRSS.xml"
# format of dates in Carto table
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Resource Watch dataset API ID
# Important! Before testing this script:
# Please change this ID OR comment out the getLayerIDs(DATASET_ID) function in the script below
# Failing to do so will overwrite the last update date on a different dataset on Resource Watch
DATASET_ID = '60d3b365-6c0b-4f1c-9b7f-f3f00f2a05d7'
'''
FUNCTIONS FOR ALL DATASETS
The functions below must go in every near real-time script.
Their format should not need to be changed.
'''
def lastUpdateDate(dataset, date):
'''
Given a Resource Watch dataset's API ID and a datetime,
this function will update the dataset's 'last update date' on the API with the given datetime
INPUT dataset: Resource Watch API dataset ID (string)
date: date to set as the 'last update date' for the input dataset (datetime)
'''
# generate the API url for this dataset
apiUrl = f'http://api.resourcewatch.org/v1/dataset/{dataset}'
# create headers to send with the request to update the 'last update date'
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
# create the json data to send in the request
body = {
"dataLastUpdated": date.isoformat() # date should be a string in the format 'YYYY-MM-DDTHH:MM:SS'
}
# send the request
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
'''
FUNCTIONS FOR CARTO DATASETS
The functions below must go in every near real-time script for a Carto dataset.
Their format should not need to be changed.
'''
def checkCreateTable(table, schema, id_field, time_field=''):
'''
Create the table if it does not exist, and pull list of IDs already in the table if it does
INPUT table: Carto table to check or create (string)
schema: dictionary of column names and types, used if we are creating the table for the first time (dictionary)
id_field: name of column that we want to use as a unique ID for this table; this will be used to compare the
source data to the our table each time we run the script so that we only have to pull data we
haven't previously uploaded (string)
time_field: optional, name of column that will store datetime information (string)
RETURN list of existing IDs in the table, pulled from the id_field column (list of strings)
'''
# check it the table already exists in Carto
if cartosql.tableExists(table, user=CARTO_USER, key=CARTO_KEY):
# if the table does exist, get a list of all the values in the id_field column
logging.info('Fetching existing IDs')
r = cartosql.getFields(id_field, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of strings, removing the first and last entries (header and an empty space at end)
return r.text.split('\r\n')[1:-1]
else:
# if the table does not exist, create it with columns based on the schema input
logging.info('Table {} does not exist, creating'.format(table))
cartosql.createTable(table, schema, user=CARTO_USER, key=CARTO_KEY)
# if a unique ID field is specified, set it as a unique index in the Carto table; when you upload data, Carto
# will ensure no two rows have the same entry in this column and return an error if you try to upload a row with
# a duplicate unique ID
if id_field:
cartosql.createIndex(table, id_field, unique=True, user=CARTO_USER, key=CARTO_KEY)
# if a time_field is specified, set it as an index in the Carto table; this is not a unique index
if time_field:
cartosql.createIndex(table, time_field, user=CARTO_USER, key=CARTO_KEY)
# return an empty list because there are no IDs in the new table yet
return []
'''
FUNCTIONS FOR THIS DATASET
The functions below have been tailored to this specific dataset.
They should all be checked because their format likely will need to be changed.
'''
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''
Delete rows that are older than a certain threshold and also bring count down to max_rows
INPUT table: name of table in Carto from which we will delete excess rows (string)
max_rows: maximum rows that can be stored in the Carto table (integer)
time_field: column that stores datetime information (string)
max_age: oldest date that can be stored in the Carto table (datetime object)
RETURN num_dropped: number of rows that have been dropped from the table (integer)
'''
# initialize number of rows that will be dropped as 0
num_dropped = 0
# check if max_age is a datetime object
if isinstance(max_age, datetime.datetime):
# convert max_age to a string
max_age = max_age.isoformat()
# if the max_age variable exists
if max_age:
# delete rows from table which are older than the max_age
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age), user=CARTO_USER, key=CARTO_KEY)
# get the number of rows that were dropped from the table
num_dropped = r.json()['total_rows']
# get cartodb_ids from carto table sorted by date (new->old)
r = cartosql.getFields('cartodb_id', table, order='{} desc'.format(time_field),
f='csv', user=CARTO_USER, key=CARTO_KEY)
# turn response into a list of strings of the ids
ids = r.text.split('\r\n')[1:-1]
# if number of rows is greater than max_rows, delete excess rows
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[max_rows:], user=CARTO_USER, key=CARTO_KEY)
# get the number of rows that have been dropped from the table
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
return(num_dropped)
def genUID(lat,lon,dt):
'''Generate unique id using latitude, longitude and date information
INPUT lat: latitude of the earthquake (float)
lon: longitude of the earthquake (float)
dt: date for which we want to generate id (string)
RETURN unique id for row (string)
'''
return '{}_{}_{}'.format(lat,lon,dt)
def processData(url, existing_ids):
'''
Fetch, process and upload new data
INPUT url: url where you can find the source data (string)
existing_ids: list of IDs that we already have in our Carto table (list of strings)
RETURN num_new: number of rows of new data sent to Carto table (integer)
'''
# create an empty list to store new data
new_data = []
# create an empty list to store unique ids of new data we will be sending to Carto table
new_ids = []
# pull data from the url
res = req.get(url)
# break down the content retrieved from the url
xml = lxml.etree.fromstring(res.content)
# convert xml into Python dictionary structure (json in this case)
json = xml2json.data(xml)
# get each volcano report from the item feature in json
items = json['channel']['item']
# loop through to process each volcano report
for item in items:
# get the volcano name and country name from the title
title = item['title'].split(')')[0].split('(')
# remove leading and trailing whitespaces from title
place_info = [place.strip() for place in title]
# get the volcano name from first index of place_info
volcano_name = place_info[0]
# get the country name from second index of place_info
country_name = place_info[1]
# get the coordinates and split it to separate out latitude and longitude
coords = item['{http://www.georss.org/georss}point'].split(' ')
# get the longitude from first index of coords
lat = coords[0]
# get the latitude from second index of coords
lon = coords[1]
# create a geojson from the coordinates
geom = {
'type':'Point',
'coordinates':[lon,lat]
}
# get the date from pubDate feature and format according to DATETIME_FORMAT
dt = parser.parse(item['pubDate'], fuzzy=True).strftime(DATETIME_FORMAT)
# get the description and source of the volcano report from description feature
info = item['description'].split('Source:')
# if the word 'Source:' doesn't exist in this item, it may be because there is more than one source (labeled as 'Sources')
if len(info) < 2:
# get the description and sources of the volcano report from description feature
info = item['description'].split('Sources:')
# remove the <p> tags from begining and end of info
description_text = [text.replace('<p>','').replace('</p>','') for text in info]
# get the description of the volcano report from the first index of description_text
description = description_text[0]
# get the source/sources of the volcano report from the second index of description_text
sources = description_text[1]
# generate unique id by using the coordinates, and datetime of the volcano
_uid = genUID(lat,lon,dt)
# if the id doesn't already exist in Carto table or
# isn't added to the list for sending to Carto yet
if _uid not in existing_ids and _uid not in new_ids:
# append the id to the list for sending to Carto
new_ids.append(_uid)
# create an empty list to store data from this row
row = []
# go through each column in the Carto table
for field in CARTO_SCHEMA:
# if we are fetching data for unique id column
if field == 'uid':
# add the unique id to the list of data from this row
row.append(_uid)
# if we are fetching data for geometry column
elif field == 'the_geom':
# add the geometry to the list of data from this row
row.append(geom)
# if we are fetching data for datetime column
elif field == 'pubdate':
# add datetime information to the list of data from this row
row.append(dt)
# if we are fetching data for description column
elif field == 'description':
# add the description of the report to the list of data from this row
row.append(description)
# if we are fetching data for sources column
elif field == 'sources':
# add the source/sources of the report to the list of data from this row
row.append(sources)
# if we are fetching data for volcano_name column
elif field == 'volcano_name':
# add the name of the volcano to the list of data from this row
row.append(volcano_name)
# if we are fetching data for description column
elif field == 'country_name':
# add the name of country where this volcano was reported to the list of data from this row
row.append(country_name)
# add the list of values from this row to the list of new data
new_data.append(row)
# find the length (number of rows) of new_data
num_new = len(new_ids)
# if we have found new dates to process
if num_new:
# insert new data into the carto table
logging.info('Adding {} new records'.format(num_new))
cartosql.blockInsertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_data, user=CARTO_USER, key=CARTO_KEY)
return(num_new)
def get_most_recent_date(table):
'''
Find the most recent date of data in the specified Carto table
INPUT table: name of table in Carto we want to find the most recent date for (string)
RETURN most_recent_date: most recent date of data in the Carto table, found in the TIME_FIELD column of the table (datetime object)
'''
# get dates in TIME_FIELD column
r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True, user=CARTO_USER, key=CARTO_KEY)
# turn the response into a list of dates
dates = r.text.split('\r\n')[1:-1]
# sort the dates from oldest to newest
dates.sort()
# turn the last (newest) date into a datetime object
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
def create_headers():
'''
Create headers to perform authorized actions on API
'''
return {
'Content-Type': "application/json",
'Authorization': "{}".format(os.getenv('apiToken')),
}
def pull_layers_from_API(dataset_id):
'''
Pull dictionary of current layers from API
INPUT dataset_id: Resource Watch API dataset ID (string)
RETURN layer_dict: dictionary of layers (dictionary of strings)
'''
# generate url to access layer configs for this dataset in back office
rw_api_url = 'https://api.resourcewatch.org/v1/dataset/{}/layer?page[size]=100'.format(dataset_id)
# request data
r = requests.get(rw_api_url)
# convert response into json and make dictionary of layers
layer_dict = json.loads(r.content.decode('utf-8'))['data']
return layer_dict
def update_layer(layer):
'''
Update layers in Resource Watch back office.
INPUT layer: layer that will be updated (string)
'''
# get current layer titile
cur_title = layer['attributes']['name']
# get current date being used from title by string manupulation
old_date_text = cur_title.split(' Volcanic')[0]
# get current date
current_date = datetime.datetime.now()
# get text for new date end which will be the current date
new_date_end = current_date.strftime("%B %d, %Y")
# get most recent starting date, 30 day ago
new_date_start = (current_date - datetime.timedelta(days=29))
new_date_start = datetime.datetime.strftime(new_date_start, "%B %d, %Y")
# construct new date range by joining new start date and new end date
new_date_text = new_date_start + ' - ' + new_date_end
# replace date in layer's title with new date
layer['attributes']['name'] = layer['attributes']['name'].replace(old_date_text, new_date_text)
# send patch to API to replace layers
# generate url to patch layer
rw_api_url_layer = "https://api.resourcewatch.org/v1/dataset/{dataset_id}/layer/{layer_id}".format(
dataset_id=layer['attributes']['dataset'], layer_id=layer['id'])
# create payload with new title and layer configuration
payload = {
'application': ['rw'],
'name': layer['attributes']['name']
}
# patch API with updates
r = requests.request('PATCH', rw_api_url_layer, data=json.dumps(payload), headers=create_headers())
# check response
# if we get a 200, the layers have been replaced
# if we get a 504 (gateway timeout) - the layers are still being replaced, but it worked
if r.ok or r.status_code==504:
logging.info('Layer replaced: {}'.format(layer['id']))
else:
logging.error('Error replacing layer: {} ({})'.format(layer['id'], r.status_code))
def updateResourceWatch(num_new):
'''
This function should update Resource Watch to reflect the new data.
This may include updating the 'last update date' and updating any dates on layers
INPUT num_new: number of new rows in Carto table (integer)
'''
# If there are new entries in the Carto table
if num_new>0:
# Update dataset's last update date on Resource Watch
most_recent_date = get_most_recent_date(CARTO_TABLE)
# Update the dates on layer legends
logging.info('Updating {}'.format(CARTO_TABLE))
# pull dictionary of current layers from API
layer_dict = pull_layers_from_API(DATASET_ID)
# go through each layer, pull the definition and update
for layer in layer_dict:
# replace layer title with new dates
update_layer(layer)
lastUpdateDate(DATASET_ID, most_recent_date)
def main():
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logging.info('STARTING')
# clear the table before starting, if specified
if CLEAR_TABLE_FIRST:
logging.info("clearing table")
# if the table exists
if cartosql.tableExists(CARTO_TABLE, user=CARTO_USER, key=CARTO_KEY):
# delete all the rows
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=CARTO_USER, key=CARTO_KEY)
# note: we do not delete the entire table because this will cause the dataset visualization on Resource Watch
# to disappear until we log into Carto and open the table again. If we simply delete all the rows, this
# problem does not occur
# Check if table exists, create it if it does not
logging.info('Checking if table exists and getting existing IDs.')
existing_ids = checkCreateTable(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
# Fetch, process, and upload new data
logging.info('Fetching new data')
num_new = processData(SOURCE_URL, existing_ids)
logging.info('Previous rows: {}, New rows: {}'.format(len(existing_ids), num_new))
# Delete data to get back to MAX_ROWS
logging.info('Deleting excess rows')
num_deleted = deleteExcessRows(CARTO_TABLE, MAX_ROWS, TIME_FIELD, MAX_AGE)
# Update Resource Watch
updateResourceWatch(num_new)
logging.info("SUCCESS")
|
<gh_stars>0
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Iterable
import pytest
from pants.backend.codegen.protobuf import protobuf_dependency_inference, target_types
from pants.backend.codegen.protobuf.go.rules import (
GenerateGoFromProtobufRequest,
parse_go_package_option,
)
from pants.backend.codegen.protobuf.go.rules import rules as go_protobuf_rules
from pants.backend.codegen.protobuf.target_types import (
ProtobufSourceField,
ProtobufSourcesGeneratorTarget,
ProtobufSourceTarget,
)
from pants.backend.codegen.protobuf.target_types import rules as protobuf_target_types_rules
from pants.backend.go import target_type_rules
from pants.backend.go.goals import test
from pants.backend.go.goals.test import GoTestFieldSet
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
link,
sdk,
tests_analysis,
third_party_pkg,
)
from pants.build_graph.address import Address
from pants.core.goals.test import TestResult
from pants.core.util_rules import config_files, source_files, stripped_source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.fs import Digest, DigestContents
from pants.engine.rules import QueryRule
from pants.engine.target import GeneratedSources, HydratedSources, HydrateSourcesRequest
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner, logging
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*external_tool_rules(),
*source_files.rules(),
*protobuf_target_types_rules(),
*protobuf_dependency_inference.rules(),
*stripped_source_files.rules(),
*go_protobuf_rules(),
*sdk.rules(),
*target_types.rules(),
# Rules needed to run Go unit test.
*test.rules(),
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*first_party_pkg.rules(),
*go_mod.rules(),
*link.rules(),
*sdk.rules(),
*target_type_rules.rules(),
*tests_analysis.rules(),
*third_party_pkg.rules(),
QueryRule(HydratedSources, [HydrateSourcesRequest]),
QueryRule(GeneratedSources, [GenerateGoFromProtobufRequest]),
QueryRule(DigestContents, (Digest,)),
QueryRule(TestResult, (GoTestFieldSet,)),
],
target_types=[
GoModTarget,
GoPackageTarget,
ProtobufSourceTarget,
ProtobufSourcesGeneratorTarget,
],
)
rule_runner.set_options(
[],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return rule_runner
def assert_files_generated(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
source_roots: list[str],
extra_args: Iterable[str] = (),
) -> None:
args = [f"--source-root-patterns={repr(source_roots)}", *extra_args]
rule_runner.set_options(args, env_inherit=PYTHON_BOOTSTRAP_ENV)
tgt = rule_runner.get_target(address)
protocol_sources = rule_runner.request(
HydratedSources, [HydrateSourcesRequest(tgt[ProtobufSourceField])]
)
generated_sources = rule_runner.request(
GeneratedSources, [GenerateGoFromProtobufRequest(protocol_sources.snapshot, tgt)]
)
assert set(generated_sources.snapshot.files) == set(expected_files)
def test_extracts_go_package() -> None:
import_path = parse_go_package_option("""option go_package = "example.com/dir1";""".encode())
assert import_path == "example.com/dir1"
@logging
def test_generates_go(rule_runner: RuleRunner) -> None:
# This tests a few things:
# * We generate the correct file names.
# * Protobuf files can import other protobuf files, and those can import others
# (transitive dependencies). We'll only generate the requested target, though.
# * We can handle multiple source roots, which need to be preserved in the final output.
rule_runner.write_files(
{
"src/protobuf/dir1/f.proto": dedent(
"""\
syntax = "proto3";
option go_package = "example.com/dir1";
package dir1;
message Person {
string name = 1;
int32 id = 2;
string email = 3;
}
"""
),
"src/protobuf/dir1/f2.proto": dedent(
"""\
syntax = "proto3";
option go_package = "example.com/dir1";
package dir1;
message Place {
string town = 1;
string country = 2;
}
"""
),
"src/protobuf/dir1/BUILD": dedent(
"""\
protobuf_sources()
"""
),
"src/protobuf/dir2/f.proto": dedent(
"""\
syntax = "proto3";
option go_package = "example.com/dir2";
package dir2;
import "dir1/f.proto";
message Employee {
dir1.Person self = 1;
dir1.Person manager = 2;
}
"""
),
"src/protobuf/dir2/BUILD": "protobuf_sources()",
# Test another source root.
"tests/protobuf/test_protos/f.proto": dedent(
"""\
syntax = "proto3";
option go_package = "example.com/test_protos";
package test_protos;
import "dir2/f.proto";
"""
),
"tests/protobuf/test_protos/BUILD": ("protobuf_sources()"),
"src/go/people/BUILD": dedent(
"""\
go_mod(name="mod")
go_package(name="pkg")
"""
),
"src/go/people/go.mod": dedent(
"""\
module example.com/people
require google.golang.org/protobuf v1.27.1
"""
),
"src/go/people/go.sum": dedent(
"""\
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
"""
),
"src/go/people/proto_test.go": dedent(
"""\
package people
import (
"testing"
pb_dir1 "example.com/dir1"
pb_dir2 "example.com/dir2"
)
func TestProtoGen(t *testing.T) {
person := pb_dir1.Person{
Name: "name",
Id: 1,
Email: "<EMAIL>",
}
if person.Name != "name" {
t.Fail()
}
place := pb_dir1.Place{
Town: "Any Town",
Country: "Some Country",
}
if place.Town != "Any Town" {
t.Fail()
}
employee := pb_dir2.Employee{
Self: &pb_dir1.Person{
Name: "self",
Id: 1,
Email: "<EMAIL>",
},
Manager: &pb_dir1.Person{
Name: "manager",
Id: 2,
Email: "<EMAIL>",
},
}
if employee.Self.Name != "self" {
t.Fail()
}
}
"""
),
}
)
def assert_gen(addr: Address, expected: Iterable[str]) -> None:
assert_files_generated(
rule_runner,
addr,
source_roots=["src/python", "/src/protobuf", "/tests/protobuf"],
expected_files=list(expected),
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f.proto"),
("src/protobuf/dir1/f.pb.go",),
)
assert_gen(
Address("src/protobuf/dir1", relative_file_path="f2.proto"),
("src/protobuf/dir1/f2.pb.go",),
)
assert_gen(
Address("src/protobuf/dir2", relative_file_path="f.proto"),
("src/protobuf/dir2/f.pb.go",),
)
assert_gen(
Address("tests/protobuf/test_protos", relative_file_path="f.proto"),
("tests/protobuf/test_protos/f.pb.go",),
)
rule_runner.set_options(
["--go-test-args=-v"],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
tgt = rule_runner.get_target(Address("src/go/people", target_name="pkg"))
result = rule_runner.request(TestResult, [GoTestFieldSet.create(tgt)])
assert result.exit_code == 0
assert "PASS: TestProtoGen" in result.stdout
def test_generates_go_grpc(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"protos/BUILD": "protobuf_sources(grpc=True)",
"protos/service.proto": dedent(
"""\
syntax = "proto3";
option go_package = "example.com/protos";
package service;
message TestMessage {
string foo = 1;
}
service TestService {
rpc noStreaming (TestMessage) returns (TestMessage);
rpc clientStreaming (stream TestMessage) returns (TestMessage);
rpc serverStreaming (TestMessage) returns (stream TestMessage);
rpc bothStreaming (stream TestMessage) returns (stream TestMessage);
}
"""
),
}
)
assert_files_generated(
rule_runner,
Address("protos", relative_file_path="service.proto"),
source_roots=["/"],
expected_files=[
"protos/service.pb.go",
"protos/service_grpc.pb.go",
],
)
|
<filename>verification/testcases/functional_testcases/test_duns_verification.py
import json
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch, Mock
from uuid import uuid4
from verification.application.handlers.verification_handlers import initiate, callback
from verification.application.services.verification_manager import verification_repository, duns_repository
from verification.constants import VerificationStatus, DUNSVerificationStatus
from verification.infrastructure.models import DUNSVerificationModel, VerificationModel
class TestDUNSVerification(TestCase):
def test_initiate(self):
username = "<EMAIL>"
org_uuid = uuid4().hex
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"body": json.dumps({
"type": "DUNS",
"entity_id": org_uuid
})
}
response = initiate(event, None)
verification = verification_repository.session.query(VerificationModel) \
.filter(VerificationModel.entity_id == org_uuid) \
.order_by(VerificationModel.created_at.desc()).first()
if verification is None:
assert False
self.assertEqual(VerificationStatus.PENDING.value, verification.status)
verification_id = verification.id
duns_verification = duns_repository.session.query(DUNSVerificationModel) \
.filter(DUNSVerificationModel.verification_id == verification_id).first()
if duns_verification is None:
assert False
self.assertEqual(DUNSVerificationStatus.PENDING.value, duns_verification.status)
self.assertEqual(org_uuid, duns_verification.org_uuid)
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_callback(self, mock_boto):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "DUNS"
org_uuid = uuid4().hex
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=org_uuid, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
duns_repository.add_item(DUNSVerificationModel(
verification_id=test_verification_id, org_uuid=org_uuid, comments=[],
status=DUNSVerificationStatus.PENDING.value, created_at=current_time, updated_at=current_time))
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"queryStringParameters": {"verification_id": test_verification_id},
"body": json.dumps({
"verificationStatus": "APPROVED",
"reviewed_by": "<EMAIL>",
"comment": "looks good"
})
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel) \
.filter(VerificationModel.entity_id == org_uuid) \
.order_by(VerificationModel.created_at.desc()).first()
if verification is None:
assert False
self.assertEqual(VerificationStatus.APPROVED.value, verification.status)
duns_verification = duns_repository.session.query(DUNSVerificationModel) \
.filter(DUNSVerificationModel.verification_id == test_verification_id).first()
if duns_verification is None:
assert False
self.assertEqual(DUNSVerificationStatus.APPROVED.value, duns_verification.status)
self.assertEqual(org_uuid, duns_verification.org_uuid)
self.assertEqual(1, len(duns_verification.comments))
@patch("common.boto_utils.BotoUtils", return_value=Mock(get_ssm_parameter=Mock(return_value="123"),
invoke_lambda=Mock(return_value={"statusCode": 201})))
def test_callback_entity(self, mock_boto):
test_verification_id = "9f2c90119cb7424b8d69319ce211ddfc"
verification_type = "DUNS"
org_uuid = uuid4().hex
username = "<EMAIL>"
current_time = datetime.utcnow()
verification_repository.add_item(VerificationModel(
id=test_verification_id, verification_type=verification_type, entity_id=org_uuid, status="PENDING",
requestee=username, created_at=current_time, updated_at=current_time
))
duns_repository.add_item(DUNSVerificationModel(
verification_id=test_verification_id, org_uuid=org_uuid, comments=[],
status=DUNSVerificationStatus.PENDING.value, created_at=current_time, updated_at=current_time))
event = {
"requestContext": {"authorizer": {"claims": {"email": username}}},
"queryStringParameters": {"entity_id": org_uuid},
"body": json.dumps({
"verificationStatus": "APPROVED",
"reviewed_by": "<EMAIL>",
"comment": "looks good"
})
}
callback(event, None)
verification = verification_repository.session.query(VerificationModel) \
.filter(VerificationModel.entity_id == org_uuid) \
.order_by(VerificationModel.created_at.desc()).first()
if verification is None:
assert False
self.assertEqual(VerificationStatus.APPROVED.value, verification.status)
duns_verification = duns_repository.session.query(DUNSVerificationModel) \
.filter(DUNSVerificationModel.verification_id == test_verification_id).first()
if duns_verification is None:
assert False
self.assertEqual(DUNSVerificationStatus.APPROVED.value, duns_verification.status)
self.assertEqual(org_uuid, duns_verification.org_uuid)
self.assertEqual(1, len(duns_verification.comments))
def tearDown(self):
duns_repository.session.query(DUNSVerificationModel).delete()
verification_repository.session.query(VerificationModel).delete()
verification_repository.session.commit()
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
from .network_base import NetworkBase
class FCNet(NetworkBase):
"""
Full connected network. Each layer consists of a matmul operator, an elementwise_add operator, and an activation function operator expect for the last layer.
Parameters:
num_ins (integer): Number of inputs.
num_outs (integer): Number of outputs.
num_layers (integer): Number of layers.
hidden_size (integer): Hiden size in each layer.
activation (optional, "tanh" / "sigmoid"): Activation function used in each layer. The default value is "tanh".
Example:
>>> import paddlescience as psci
>>> net = psci.network.FCNet(2, 3, 10, 50, activiation='tanh')
"""
def __init__(self,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
super(FCNet, self).__init__()
self.num_ins = num_ins
self.num_outs = num_outs
self.num_layers = num_layers
self.hidden_size = hidden_size
self.weights = []
self.biases = []
if activation == 'sigmoid':
self.activation = F.sigmoid
elif activation == 'tanh':
self.activation = paddle.tanh
else:
assert 0, "Unsupported activation type."
# dynamic mode: make network here
# static mode: make network in solver
if paddle.in_dynamic_mode():
self.make_network_dynamic()
# self.make_network_static()
# dynamic mode: net's parameters
# static mode: None
def parameters(self):
if paddle.in_dynamic_mode():
return super(FCNet, self).parameters()
else:
return None
def make_network_dynamic(self):
for i in range(self.num_layers):
if i == 0:
lsize = self.num_ins
rsize = self.hidden_size
elif i == (self.num_layers - 1):
lsize = self.hidden_size
rsize = self.num_outs
else:
lsize = self.hidden_size
rsize = self.hidden_size
w = self.create_parameter(
shape=[lsize, rsize], dtype=self._dtype, is_bias=False)
b = self.create_parameter(
shape=[rsize], dtype=self._dtype, is_bias=True)
self.weights.append(w)
self.biases.append(b)
self.add_parameter("w_" + str(i), w)
self.add_parameter("b_" + str(i), b)
def make_network_static(self):
for i in range(self.num_layers):
if i == 0:
lsize = self.num_ins
rsize = self.hidden_size
elif i == (self.num_layers - 1):
lsize = self.hidden_size
rsize = self.num_outs
else:
lsize = self.hidden_size
rsize = self.hidden_size
w = paddle.static.create_parameter(
shape=[lsize, rsize], dtype=self._dtype, is_bias=False)
b = paddle.static.create_parameter(
shape=[rsize], dtype=self._dtype, is_bias=True)
self.weights.append(w)
self.biases.append(b)
self.add_parameter("w_" + str(i), w)
self.add_parameter("b_" + str(i), b)
def nn_func(self, ins):
u = ins
for i in range(self.num_layers - 1):
u = paddle.matmul(u, self.weights[i])
u = paddle.add(u, self.biases[i])
u = self.activation(u)
u = paddle.matmul(u, self.weights[-1])
u = paddle.add(u, self.biases[-1])
return u
def flatten_params(self):
flat_vars = list(map(paddle.flatten, self.weights + self.biases))
return paddle.flatten(paddle.concat(flat_vars))
def reconstruct(self, param_data):
params = self.weights + self.biases
param_sizes = [param.size for param in params]
flat_params = paddle.split(param_data, param_sizes)
is_biases = [False for _ in self.weights] + [True for _ in self.biases]
self.weights = []
self.biases = []
for old_param, flat_param, is_bias in zip(params, flat_params,
is_biases):
shape = old_param.shape
value = paddle.reshape(flat_param, shape)
# new_param = self.create_parameter(shape,
# dtype=self.dtype,
# is_bias=is_bias,
# default_initializer=Assign(value))
# if is_bias:
# self.biases.append(new_param)
# else:
# self.weights.append(new_param)
# self.add_parameter(old_param.name.split('.')[-1], new_param)
new_param = value
if is_bias:
self.biases.append(new_param)
else:
self.weights.append(new_param)
|
<reponame>benety/mongo
# Copyright (C) 2021-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
# pylint: disable=too-many-lines
"""Checks compatibility of old and new IDL files.
In order to support user-selectable API versions for the server, server commands are now
defined using IDL files. This script checks that old and new commands are compatible with each
other, which allows commands to be updated without breaking the API specifications within a
specific API version.
This script accepts two directories as arguments, the "old" and the "new" IDL directory.
Before running this script, run checkout_idl_files_from_past_releases.py to find and create
directories containing the old IDL files from previous releases.
"""
import argparse
import os
import sys
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Set, Optional, Tuple, Union
from idl import parser, syntax, errors, common
from idl.compiler import CompilerImportResolver
from idl_compatibility_errors import IDLCompatibilityContext, IDLCompatibilityErrorCollection
ALLOW_ANY_TYPE_LIST: List[str] = [
# This list if only used in unit-tests.
"commandAllowedAnyTypes",
"commandAllowedAnyTypes-param-anyTypeParam",
"commandAllowedAnyTypes-reply-anyTypeField",
"oldTypeBsonAnyAllowList",
"newTypeBsonAnyAllowList",
"oldReplyFieldTypeBsonAnyAllowList-reply-oldBsonSerializationTypeAnyReplyField",
"newReplyFieldTypeBsonAnyAllowList-reply-newBsonSerializationTypeAnyReplyField",
"oldParamTypeBsonAnyAllowList-param-bsonTypeAnyParam",
"newParamTypeBsonAnyAllowList-param-bsonTypeAnyParam",
"commandAllowedAnyTypesWithVariant-reply-anyTypeField",
"replyFieldTypeBsonAnyWithVariant-reply-bsonSerializationTypeAnyStructField",
"replyFieldTypeBsonAnyWithVariantWithArray-reply-bsonSerializationTypeAnyStructField",
"parameterFieldTypeBsonAnyWithVariant-param-bsonSerializationTypeAnyStructField",
"parameterFieldTypeBsonAnyWithVariantWithArray-param-bsonSerializationTypeAnyStructField",
"commandTypeBsonAnyWithVariant",
"commandTypeBsonAnyWithVariantWithArray",
"replyFieldCppTypeNotEqual-reply-cppTypeNotEqualReplyField",
"commandCppTypeNotEqual",
"commandParameterCppTypeNotEqual-param-cppTypeNotEqualParam",
"replyFieldSerializerNotEqual-reply-serializerNotEqualReplyField",
"commandSerializerNotEqual",
"commandParameterSerializerNotEqual-param-serializerNotEqualParam",
"replyFieldDeserializerNotEqual-reply-deserializerNotEqualReplyField",
"commandDeserializerNotEqual",
"commandParameterDeserializerNotEqual-param-deserializerNotEqualParam",
"newlyAddedReplyFieldTypeBsonAnyAllowed-reply-newlyAddedBsonSerializationTypeAnyReplyField",
"replyFieldTypeBsonAnyWithVariantUnstable-reply-bsonSerializationTypeWithVariantAnyUnstableReplyField",
"newlyAddedParamBsonAnyAllowList-param-newlyAddedBsonAnyAllowListParam",
"newlyAddedTypeFieldBsonAnyAllowList",
"parameterFieldTypeBsonAnyWithVariantUnstable-param-bsonSerializationTypeAnyStructField",
"commandTypeBsonAnyWithVariantUnstable",
"commandParameterCppTypeNotEqualUnstable-param-cppTypeNotEqualParam",
"replyFieldCppTypeNotEqualUnstable-reply-cppTypeNotEqualReplyUnstableField",
"commandCppTypeNotEqualUnstable",
"commandParameterSerializerNotEqualUnstable-param-serializerNotEqualParam",
"replyFieldSerializerNotEqualUnstable-reply-serializerNotEqualReplyUnstableField",
"commandSerializerNotEqualUnstable",
"commandParameterDeserializerNotEqualUnstable-param-deserializerNotEqualParam",
"replyFieldDeserializerNotEqualUnstable-reply-deserializerNotEqualReplyUnstableField",
"commandDeserializerNotEqualUnstable",
'create-param-backwards',
'saslStart-param-payload',
'saslStart-param-payload',
'saslStart-reply-payload',
'saslContinue-param-payload',
'saslContinue-reply-payload',
# These commands (aggregate, find, update, delete, findAndModify, explain) might contain some
# fields with type `any`. Currently, it's not possible to avoid the `any` type in those cases.
# Instead, here are the preventive measures in-place to catch unintentional breaking changes:
# 1- Added comments on top of custom serializers/deserializers (related to these fields) to
# let the future developers know that their modifications to these methods might lead to
# a breaking change in the API.
# 2- Added proper unit-tests to catch accidental changes to the custom serializers/deserializers
# by over-fitting on the current implementation of these custom serializers/deserializers.
# 3- Added further checks to the current script (idl_check_compatibility.py) to check for
# changing a custom serializer/deserializer and considering it as a potential breaking
# change.
'aggregate-param-pipeline',
'aggregate-param-explain',
'aggregate-param-allowDiskUse',
'aggregate-param-cursor',
'aggregate-param-hint',
'aggregate-param-needsMerge',
'aggregate-param-fromMongos',
'aggregate-param-$_requestReshardingResumeToken',
'aggregate-param-isMapReduceCommand',
'count-param-hint',
'count-param-limit',
'count-param-maxTimeMS',
'find-param-filter',
'find-param-projection',
'find-param-sort',
'find-param-hint',
'find-param-collation',
'find-param-singleBatch',
'find-param-allowDiskUse',
'find-param-min',
'find-param-max',
'find-param-returnKey',
'find-param-showRecordId',
'find-param-$queryOptions',
'find-param-tailable',
'find-param-oplogReplay',
'find-param-noCursorTimeout',
'find-param-awaitData',
'find-param-allowPartialResults',
'find-param-readOnce',
'find-param-allowSpeculativeMajorityRead',
'find-param-$_requestResumeToken',
'find-param-$_resumeAfter',
'find-param-maxTimeMS',
'update-param-u',
'update-param-hint',
'update-param-upsertSupplied',
'update-reply-_id',
'delete-param-limit',
'delete-param-hint',
'findAndModify-param-hint',
'findAndModify-param-update',
'findAndModify-reply-upserted',
'insert-reply-opTime',
'update-reply-opTime',
'delete-reply-opTime',
'aggregate-reply-partialResultsReturned',
'aggregate-reply-invalidated',
'find-reply-partialResultsReturned',
'find-reply-invalidated',
'getMore-reply-partialResultsReturned',
'getMore-reply-invalidated',
]
# Do not add user visible fields already released in earlier versions.
IGNORE_UNSTABLE_LIST: List[str] = [
# The 'originalSpec' field was introduced in v5.1 behind a disabled feature flag and is not user
# visible. This is part of the listIndexes output when executed against system.bucket.*
# collections, which users should avoid doing.
'listIndexes-reply-originalSpec',
# The 'vars' field was introduced to facilitate communication between mongot and mongod and is
# not user visible.
'find-reply-vars',
'aggregate-reply-vars',
# The 'cursor' field is now optional in a reply, as inter-node communication in aggregation
# can return one or more cursors. Multiple cursors are covered under the 'cursors' field.
'find-reply-cursor',
'aggregate-reply-cursor',
# The 'recordPreImages' field is only used by Realm and is not documented to users.
'collMod-param-recordPreImages',
# The 'ignoreUnknownIndexOptions' field is for internal use only and is not documented to users.
'createIndexes-param-ignoreUnknownIndexOptions',
# The 'runtimeConstants' field is a legacy field for internal use only and is not documented to
# users.
'delete-param-runtimeConstants',
]
SKIPPED_FILES = [
"unittest.idl", "mozILocalization.idl", "mozILocaleService.idl", "mozIOSPreferences.idl",
"nsICollation.idl", "nsIStringBundle.idl", "nsIScriptableUConv.idl", "nsITextToSubURI.idl"
]
# Do not add commands that were visible to users in previously released versions.
IGNORE_COMMANDS_LIST: List[str] = [
# The following commands were released behind a feature flag in 5.3 but were shelved in
# favor of getClusterParameter and setClusterParameter. Since the feature flag was not enabled
# in 5.3, they were effectively unusable and so can be safely removed from the strict API.
'getChangeStreamOptions',
'setChangeStreamOptions',
]
class FieldCompatibility:
"""Information about a Field to check compatibility."""
def __init__(self, field_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
idl_file: syntax.IDLParsedSpec, idl_file_path: str, unstable: Optional[bool],
optional: bool) -> None:
"""Initialize data members and hand special cases, such as optionalBool type."""
self.field_type = field_type
self.idl_file = idl_file
self.idl_file_path = idl_file_path
self.unstable = unstable
self.optional = optional
if isinstance(self.field_type, syntax.Type) and self.field_type.name == "optionalBool":
# special case for optionalBool type, because it is compatible
# with bool type, but has bson_serialization_type == 'any'
# which is not supported by many checks
self.field_type = syntax.Type(field_type.file_name, field_type.line, field_type.column)
self.field_type.name = "bool"
self.field_type.bson_serialization_type = ["bool"]
self.optional = True
@dataclass
class FieldCompatibilityPair:
"""Information about an old and new Field pair to check compatibility."""
old: FieldCompatibility
new: FieldCompatibility
cmd_name: str
field_name: str
class ArrayTypeCheckResult(Enum):
"""Enumeration representing different return values of check_array_type."""
INVALID = 0
TRUE = 1
FALSE = 2
def get_new_commands(
ctxt: IDLCompatibilityContext, new_idl_dir: str, import_directories: List[str]
) -> Tuple[Dict[str, syntax.Command], Dict[str, syntax.IDLParsedSpec], Dict[str, str]]:
"""Get new IDL commands and check validity."""
new_commands: Dict[str, syntax.Command] = dict()
new_command_file: Dict[str, syntax.IDLParsedSpec] = dict()
new_command_file_path: Dict[str, str] = dict()
for dirpath, _, filenames in os.walk(new_idl_dir):
for new_filename in filenames:
if not new_filename.endswith('.idl') or new_filename in SKIPPED_FILES:
continue
new_idl_file_path = os.path.join(dirpath, new_filename)
with open(new_idl_file_path) as new_file:
new_idl_file = parser.parse(
new_file, new_idl_file_path,
CompilerImportResolver(import_directories + [new_idl_dir]))
if new_idl_file.errors:
new_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {new_idl_file_path}")
for new_cmd in new_idl_file.spec.symbols.commands:
# Ignore imported commands as they will be processed in their own file.
if new_cmd.api_version == "" or new_cmd.imported:
continue
if new_cmd.api_version != "1":
# We're not ready to handle future API versions yet.
ctxt.add_command_invalid_api_version_error(
new_cmd.command_name, new_cmd.api_version, new_idl_file_path)
continue
if new_cmd.command_name in new_commands:
ctxt.add_duplicate_command_name_error(new_cmd.command_name, new_idl_dir,
new_idl_file_path)
continue
new_commands[new_cmd.command_name] = new_cmd
new_command_file[new_cmd.command_name] = new_idl_file
new_command_file_path[new_cmd.command_name] = new_idl_file_path
return new_commands, new_command_file, new_command_file_path
def get_chained_type_or_struct(
chained_type_or_struct: Union[syntax.ChainedType, syntax.ChainedStruct],
idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get chained type or struct from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
resolved = idl_file.spec.symbols.resolve_type_from_name(parser_ctxt, chained_type_or_struct,
chained_type_or_struct.name,
chained_type_or_struct.name)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return resolved
def get_field_type(field: Union[syntax.Field, syntax.Command], idl_file: syntax.IDLParsedSpec,
idl_file_path: str) -> Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]]:
"""Resolve and get field type of a field from the IDL file."""
parser_ctxt = errors.ParserContext(idl_file_path, errors.ParserErrorCollection())
field_type = idl_file.spec.symbols.resolve_field_type(parser_ctxt, field, field.name,
field.type)
if parser_ctxt.errors.has_errors():
parser_ctxt.errors.dump_errors()
return field_type
def check_subset(ctxt: IDLCompatibilityContext, cmd_name: str, field_name: str, type_name: str,
sub_list: List[Union[str, syntax.EnumValue]],
super_list: List[Union[str, syntax.EnumValue]], file_path: str):
# pylint: disable=too-many-arguments
"""Check if sub_list is a subset of the super_list and log an error if not."""
if not set(sub_list).issubset(super_list):
ctxt.add_reply_field_not_subset_error(cmd_name, field_name, type_name, file_path)
def check_superset(ctxt: IDLCompatibilityContext, cmd_name: str, type_name: str,
super_list: List[Union[str, syntax.EnumValue]],
sub_list: List[Union[str, syntax.EnumValue]], file_path: str,
param_name: Optional[str], is_command_parameter: bool):
# pylint: disable=too-many-arguments
"""Check if super_list is a superset of the sub_list and log an error if not."""
if not set(super_list).issuperset(sub_list):
ctxt.add_command_or_param_type_not_superset_error(cmd_name, type_name, file_path,
param_name, is_command_parameter)
def check_reply_field_type_recursive(ctxt: IDLCompatibilityContext,
field_pair: FieldCompatibilityPair) -> None:
# pylint: disable=too-many-branches
"""Check compatibility between old and new reply field type if old field type is a syntax.Type instance."""
old_field = field_pair.old
new_field = field_pair.new
old_field_type = old_field.field_type
new_field_type = new_field.field_type
cmd_name = field_pair.cmd_name
field_name = field_pair.field_name
# If the old field is unstable, we only add errors related to the use of 'any' as the
# bson_serialization_type. For all other errors, we check that the old field is stable
# before adding an error.
if not isinstance(new_field_type, syntax.Type):
if not old_field.unstable:
ctxt.add_new_reply_field_type_enum_or_struct_error(
cmd_name, field_name, new_field_type.name, old_field_type.name,
new_field.idl_file_path)
return
# If bson_serialization_type switches from 'any' to non-any type.
if "any" in old_field_type.bson_serialization_type and "any" not in new_field_type.bson_serialization_type:
ctxt.add_old_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,
new_field_type.name, old_field.idl_file_path)
return
# If bson_serialization_type switches from non-any to 'any' type.
if "any" not in old_field_type.bson_serialization_type and "any" in new_field_type.bson_serialization_type:
ctxt.add_new_reply_field_bson_any_error(cmd_name, field_name, old_field_type.name,
new_field_type.name, new_field.idl_file_path)
return
allow_name: str = cmd_name + "-reply-" + field_name
if "any" in old_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
if allow_name not in ALLOW_ANY_TYPE_LIST:
ctxt.add_old_reply_field_bson_any_not_allowed_error(
cmd_name, field_name, old_field_type.name, old_field.idl_file_path)
return
# If cpp_type is changed, it's a potential breaking change.
if old_field_type.cpp_type != new_field_type.cpp_type:
ctxt.add_reply_field_cpp_type_not_equal_error(cmd_name, field_name, new_field_type.name,
new_field.idl_file_path)
# If serializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_field_type.serializer != new_field_type.serializer:
ctxt.add_reply_field_serializer_not_equal_error(
cmd_name, field_name, new_field_type.name, new_field.idl_file_path)
# If deserializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_field_type.deserializer != new_field_type.deserializer:
ctxt.add_reply_field_deserializer_not_equal_error(
cmd_name, field_name, new_field_type.name, new_field.idl_file_path)
if isinstance(old_field_type, syntax.VariantType):
# If the new type is not variant just check the single type.
new_variant_types = new_field_type.variant_types if isinstance(
new_field_type, syntax.VariantType) else [new_field_type]
old_variant_types = old_field_type.variant_types
# Check that new variant types are a subset of old variant types.
for new_variant_type in new_variant_types:
for old_variant_type in old_variant_types:
if old_variant_type.name == new_variant_type.name:
# Check that the old and new version of each variant type is also compatible.
old = FieldCompatibility(old_variant_type, old_field.idl_file,
old_field.idl_file_path, old_field.unstable,
old_field.optional)
new = FieldCompatibility(new_variant_type, new_field.idl_file,
new_field.idl_file_path, new_field.unstable,
new_field.optional)
check_reply_field_type(ctxt,
FieldCompatibilityPair(old, new, cmd_name, field_name))
break
else:
# new_variant_type was not found in old_variant_types.
if not old_field.unstable:
ctxt.add_new_reply_field_variant_type_not_subset_error(
cmd_name, field_name, new_variant_type.name, new_field.idl_file_path)
# If new type is variant and has a struct as a variant type, compare old and new variant_struct_type.
# Since enums can't be part of variant types, we don't explicitly check for enums.
if isinstance(new_field_type,
syntax.VariantType) and new_field_type.variant_struct_type is not None:
if old_field_type.variant_struct_type is None and not old_field.unstable:
ctxt.add_new_reply_field_variant_type_not_subset_error(
cmd_name, field_name, new_field_type.variant_struct_type.name,
new_field.idl_file_path)
else:
check_reply_fields(ctxt, old_field_type.variant_struct_type,
new_field_type.variant_struct_type, cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path,
new_field.idl_file_path)
elif not old_field.unstable:
if isinstance(new_field_type, syntax.VariantType):
ctxt.add_new_reply_field_variant_type_error(cmd_name, field_name, old_field_type.name,
new_field.idl_file_path)
else:
check_subset(ctxt, cmd_name, field_name, new_field_type.name,
new_field_type.bson_serialization_type,
old_field_type.bson_serialization_type, new_field.idl_file_path)
def check_reply_field_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair):
"""Check compatibility between old and new reply field type."""
# pylint: disable=too-many-branches
old_field = field_pair.old
new_field = field_pair.new
array_check = check_array_type(ctxt, "reply_field", old_field.field_type, new_field.field_type,
field_pair.cmd_name, 'type', old_field.idl_file_path,
new_field.idl_file_path, old_field.unstable)
if array_check == ArrayTypeCheckResult.INVALID:
return
if array_check == ArrayTypeCheckResult.TRUE:
old_field.field_type = old_field.field_type.element_type
new_field.field_type = new_field.field_type.element_type
old_field_type = old_field.field_type
new_field_type = new_field.field_type
cmd_name = field_pair.cmd_name
field_name = field_pair.field_name
if old_field_type is None:
ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, old_field.idl_file_path)
ctxt.errors.dump_errors()
sys.exit(1)
if new_field_type is None:
ctxt.add_reply_field_type_invalid_error(cmd_name, field_name, new_field.idl_file_path)
ctxt.errors.dump_errors()
sys.exit(1)
if isinstance(old_field_type, syntax.Type):
check_reply_field_type_recursive(ctxt, field_pair)
elif isinstance(old_field_type, syntax.Enum) and not old_field.unstable:
if isinstance(new_field_type, syntax.Enum):
check_subset(ctxt, cmd_name, field_name, new_field_type.name, new_field_type.values,
old_field_type.values, new_field.idl_file_path)
else:
ctxt.add_new_reply_field_type_not_enum_error(cmd_name, field_name, new_field_type.name,
old_field_type.name,
new_field.idl_file_path)
elif isinstance(old_field_type, syntax.Struct):
if isinstance(new_field_type, syntax.Struct):
check_reply_fields(ctxt, old_field_type, new_field_type, cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path)
else:
if not old_field.unstable:
ctxt.add_new_reply_field_type_not_struct_error(
cmd_name, field_name, new_field_type.name, old_field_type.name,
new_field.idl_file_path)
def check_array_type(ctxt: IDLCompatibilityContext, symbol: str,
old_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
new_type: Optional[Union[syntax.Enum, syntax.Struct, syntax.Type]],
cmd_name: str, symbol_name: str, old_idl_file_path: str,
new_idl_file_path: str, old_field_unstable: bool) -> ArrayTypeCheckResult:
"""
Check compatibility between old and new ArrayTypes.
:returns:
- ArrayTypeCheckResult.TRUE : when the old type and new type are of array type.
- ArrayTypeCheckResult.FALSE : when the old type and new type aren't of array type.
- ArrayTypeCheckResult.INVALID : when one of the types is not of array type while the other one is.
"""
# pylint: disable=too-many-arguments,too-many-branches
old_is_array = isinstance(old_type, syntax.ArrayType)
new_is_array = isinstance(new_type, syntax.ArrayType)
if not old_is_array and not new_is_array:
return ArrayTypeCheckResult.FALSE
if (not old_is_array or not new_is_array) and not old_field_unstable:
ctxt.add_type_not_array_error(symbol, cmd_name, symbol_name, new_type.name, old_type.name,
new_idl_file_path if old_is_array else old_idl_file_path)
return ArrayTypeCheckResult.INVALID
return ArrayTypeCheckResult.TRUE
def check_reply_field(ctxt: IDLCompatibilityContext, old_field: syntax.Field,
new_field: syntax.Field, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,
new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,
new_idl_file_path: str):
"""Check compatibility between old and new reply field."""
# pylint: disable=too-many-arguments
old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
old_field_optional = old_field.optional or (old_field_type
and old_field_type.name == "optionalBool")
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == "optionalBool")
field_name: str = cmd_name + "-reply-" + new_field.name
if not old_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
if new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
ctxt.add_new_reply_field_unstable_error(cmd_name, new_field.name, new_idl_file_path)
if new_field_optional and not old_field_optional:
ctxt.add_new_reply_field_optional_error(cmd_name, new_field.name, new_idl_file_path)
if new_field.validator:
if old_field.validator:
if new_field.validator != old_field.validator:
ctxt.add_reply_field_validators_not_equal_error(cmd_name, new_field.name,
new_idl_file_path)
else:
ctxt.add_reply_field_contains_validator_error(cmd_name, new_field.name,
new_idl_file_path)
old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,
old_field.unstable, old_field.optional)
new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,
new_field.unstable, new_field.optional)
field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,
old_field.name)
check_reply_field_type(ctxt, field_pair)
def check_reply_fields(ctxt: IDLCompatibilityContext, old_reply: syntax.Struct,
new_reply: syntax.Struct, cmd_name: str, old_idl_file: syntax.IDLParsedSpec,
new_idl_file: syntax.IDLParsedSpec, old_idl_file_path: str,
new_idl_file_path: str):
"""Check compatibility between old and new reply fields."""
# pylint: disable=too-many-arguments,too-many-branches
for new_chained_type in new_reply.chained_types or []:
resolved_new_chained_type = get_chained_type_or_struct(new_chained_type, new_idl_file,
new_idl_file_path)
if resolved_new_chained_type is not None:
for old_chained_type in old_reply.chained_types or []:
resolved_old_chained_type = get_chained_type_or_struct(
old_chained_type, old_idl_file, old_idl_file_path)
if (resolved_old_chained_type is not None
and resolved_old_chained_type.name == resolved_new_chained_type.name):
# Check that the old and new version of each chained type is also compatible.
old = FieldCompatibility(resolved_old_chained_type, old_idl_file,
old_idl_file_path, unstable=False, optional=False)
new = FieldCompatibility(resolved_new_chained_type, new_idl_file,
new_idl_file_path, unstable=False, optional=False)
check_reply_field_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, old_reply.name))
break
else:
# new chained type was not found in old chained types.
ctxt.add_new_reply_chained_type_not_subset_error(
cmd_name, new_reply.name, resolved_new_chained_type.name, new_idl_file_path)
old_reply_fields = get_all_struct_fields(old_reply, old_idl_file, old_idl_file_path)
new_reply_fields = get_all_struct_fields(new_reply, new_idl_file, new_idl_file_path)
for old_field in old_reply_fields or []:
new_field_exists = False
for new_field in new_reply_fields or []:
if new_field.name == old_field.name:
new_field_exists = True
check_reply_field(ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path)
break
if not new_field_exists and not old_field.unstable:
ctxt.add_new_reply_field_missing_error(cmd_name, old_field.name, old_idl_file_path)
for new_field in new_reply_fields or []:
# Check that all fields in the new IDL have specified the 'unstable' field.
if new_field.unstable is None:
ctxt.add_new_reply_field_requires_unstable_error(cmd_name, new_field.name,
new_idl_file_path)
# Check that newly added fields do not have an unallowed use of 'any' as the
# bson_serialization_type.
newly_added = True
for old_field in old_reply_fields or []:
if new_field.name == old_field.name:
newly_added = False
if newly_added:
allow_name: str = cmd_name + "-reply-" + new_field.name
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
# If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.
if isinstance(
new_field_type, syntax.Type
) and new_field_type.bson_serialization_type is not None and "any" in new_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
any_allow = allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'
if not any_allow:
ctxt.add_new_reply_field_bson_any_not_allowed_error(
cmd_name, new_field.name, new_field_type.name, new_idl_file_path)
def check_param_or_command_type_recursive(ctxt: IDLCompatibilityContext,
field_pair: FieldCompatibilityPair,
is_command_parameter: bool):
# pylint: disable=too-many-branches,too-many-locals
"""
Check compatibility between old and new command or param type recursively.
If the old type is a syntax.Type instance, check the compatibility between the old and new
command type or parameter type recursively.
"""
old_field = field_pair.old
new_field = field_pair.new
old_type = old_field.field_type
new_type = new_field.field_type
cmd_name = field_pair.cmd_name
param_name = field_pair.field_name
# If the old field is unstable, we only add errors related to the use of 'any' as the
# bson_serialization_type. For all other errors, we check that the old field is stable
# before adding an error.
if not isinstance(new_type, syntax.Type):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_enum_or_struct_error(
cmd_name, new_type.name, old_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
return
allow_name: str = cmd_name + "-param-" + param_name if is_command_parameter else cmd_name
# If bson_serialization_type switches from 'any' to non-any type.
if "any" in old_type.bson_serialization_type and "any" not in new_type.bson_serialization_type:
ctxt.add_old_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
old_field.idl_file_path, param_name,
is_command_parameter)
return
# If bson_serialization_type switches from non-any to 'any' type.
if "any" not in old_type.bson_serialization_type and "any" in new_type.bson_serialization_type:
ctxt.add_new_command_or_param_type_bson_any_error(cmd_name, old_type.name, new_type.name,
new_field.idl_file_path, param_name,
is_command_parameter)
return
if "any" in old_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
if allow_name not in ALLOW_ANY_TYPE_LIST:
ctxt.add_old_command_or_param_type_bson_any_not_allowed_error(
cmd_name, old_type.name, old_field.idl_file_path, param_name, is_command_parameter)
return
# If cpp_type is changed, it's a potential breaking change.
if old_type.cpp_type != new_type.cpp_type:
ctxt.add_command_or_param_cpp_type_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If serializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.serializer != new_type.serializer:
ctxt.add_command_or_param_serializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
# If deserializer is changed, it's a potential breaking change.
if (not old_field.unstable) and old_type.deserializer != new_type.deserializer:
ctxt.add_command_or_param_deserializer_not_equal_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name, is_command_parameter)
if isinstance(old_type, syntax.VariantType):
if not isinstance(new_type, syntax.VariantType):
if not old_field.unstable:
ctxt.add_new_command_or_param_type_not_variant_type_error(
cmd_name, new_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
else:
new_variant_types = new_type.variant_types
old_variant_types = old_type.variant_types
# Check that new variant types are a superset of old variant types.
for old_variant_type in old_variant_types:
for new_variant_type in new_variant_types:
# object->object_owned serialize to the same bson type. object_owned->object is
# not always safe so we only limit this special case to object->object_owned.
if (old_variant_type.name == "object" and new_variant_type.name == "object_owned") or \
old_variant_type.name == new_variant_type.name:
# Check that the old and new version of each variant type is also compatible.
old = FieldCompatibility(old_variant_type, old_field.idl_file,
old_field.idl_file_path, old_field.unstable,
old_field.optional)
new = FieldCompatibility(new_variant_type, new_field.idl_file,
new_field.idl_file_path, new_field.unstable,
new_field.optional)
check_param_or_command_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, param_name),
is_command_parameter)
break
else:
if not old_field.unstable:
# old_variant_type was not found in new_variant_types.
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_variant_type.name, new_field.idl_file_path, param_name,
is_command_parameter)
# If old and new types both have a struct as a variant type, compare old and new variant_struct_type.
# Since enums can't be part of variant types, we don't explicitly check for enums.
if old_type.variant_struct_type is not None:
if new_type.variant_struct_type is not None:
check_command_params_or_type_struct_fields(
ctxt, old_type.variant_struct_type, new_type.variant_struct_type, cmd_name,
old_field.idl_file, new_field.idl_file, old_field.idl_file_path,
new_field.idl_file_path, is_command_parameter)
# If old type has a variant struct type and new type does not have a variant struct type.
elif not old_field.unstable:
ctxt.add_new_command_or_param_variant_type_not_superset_error(
cmd_name, old_type.variant_struct_type.name, new_field.idl_file_path,
param_name, is_command_parameter)
elif not old_field.unstable:
check_superset(ctxt, cmd_name, new_type.name, new_type.bson_serialization_type,
old_type.bson_serialization_type, new_field.idl_file_path, param_name,
is_command_parameter)
def check_param_or_command_type(ctxt: IDLCompatibilityContext, field_pair: FieldCompatibilityPair,
is_command_parameter: bool):
"""Check compatibility between old and new command parameter type or command type."""
# pylint: disable=too-many-branches
old_field = field_pair.old
new_field = field_pair.new
array_check = check_array_type(
ctxt, "command_parameter" if is_command_parameter else "command_namespace",
old_field.field_type, new_field.field_type, field_pair.cmd_name,
field_pair.field_name if is_command_parameter else "type", old_field.idl_file_path,
new_field.idl_file_path, old_field.unstable)
if array_check == ArrayTypeCheckResult.INVALID:
return
if array_check == ArrayTypeCheckResult.TRUE:
old_field.field_type = old_field.field_type.element_type
new_field.field_type = new_field.field_type.element_type
old_type = old_field.field_type
new_type = new_field.field_type
if old_type is None:
ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, old_field.idl_file_path,
field_pair.field_name, is_command_parameter)
ctxt.errors.dump_errors()
sys.exit(1)
if new_type is None:
ctxt.add_command_or_param_type_invalid_error(field_pair.cmd_name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
ctxt.errors.dump_errors()
sys.exit(1)
if isinstance(old_type, syntax.Type):
check_param_or_command_type_recursive(ctxt, field_pair, is_command_parameter)
# Only add type errors if the old field is stable.
elif isinstance(old_type, syntax.Enum) and not old_field.unstable:
if isinstance(new_type, syntax.Enum):
check_superset(ctxt, field_pair.cmd_name, new_type.name, new_type.values,
old_type.values, new_field.idl_file_path, field_pair.field_name,
is_command_parameter)
else:
ctxt.add_new_command_or_param_type_not_enum_error(
field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
elif isinstance(old_type, syntax.Struct):
if isinstance(new_type, syntax.Struct):
check_command_params_or_type_struct_fields(
ctxt, old_type, new_type, field_pair.cmd_name, old_field.idl_file,
new_field.idl_file, old_field.idl_file_path, new_field.idl_file_path,
is_command_parameter)
else:
if not old_field.unstable:
ctxt.add_new_command_or_param_type_not_struct_error(
field_pair.cmd_name, new_type.name, old_type.name, new_field.idl_file_path,
field_pair.field_name, is_command_parameter)
def check_param_or_type_validator(ctxt: IDLCompatibilityContext, old_field: syntax.Field,
new_field: syntax.Field, cmd_name: str, new_idl_file_path: str,
type_name: Optional[str], is_command_parameter: bool):
"""
Check compatibility between old and new validators.
Check compatibility between old and new validators in command parameter type and command type
struct fields.
"""
# pylint: disable=too-many-arguments
if new_field.validator:
if old_field.validator:
if new_field.validator != old_field.validator:
ctxt.add_command_or_param_type_validators_not_equal_error(
cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)
else:
ctxt.add_command_or_param_type_contains_validator_error(
cmd_name, new_field.name, new_idl_file_path, type_name, is_command_parameter)
def get_all_struct_fields(struct: syntax.Struct, idl_file: syntax.IDLParsedSpec,
idl_file_path: str):
"""Get all the fields of a struct, including the chained struct fields."""
all_fields = struct.fields or []
for chained_struct in struct.chained_structs or []:
resolved_chained_struct = get_chained_type_or_struct(chained_struct, idl_file,
idl_file_path)
if resolved_chained_struct is not None:
for field in resolved_chained_struct.fields:
all_fields.append(field)
return all_fields
def check_command_params_or_type_struct_fields(
ctxt: IDLCompatibilityContext, old_struct: syntax.Struct, new_struct: syntax.Struct,
cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str, is_command_parameter: bool):
"""Check compatibility between old and new parameters or command type fields."""
# pylint: disable=too-many-arguments,too-many-branches
# Check chained types.
for old_chained_type in old_struct.chained_types or []:
resolved_old_chained_type = get_chained_type_or_struct(old_chained_type, old_idl_file,
old_idl_file_path)
if resolved_old_chained_type is not None:
for new_chained_type in new_struct.chained_types or []:
resolved_new_chained_type = get_chained_type_or_struct(
new_chained_type, new_idl_file, new_idl_file_path)
if (resolved_new_chained_type is not None
and resolved_old_chained_type.name == resolved_new_chained_type.name):
# Check that the old and new version of each chained type is also compatible.
old = FieldCompatibility(resolved_old_chained_type, old_idl_file,
old_idl_file_path, unstable=False, optional=False)
new = FieldCompatibility(resolved_new_chained_type, new_idl_file,
new_idl_file_path, unstable=False, optional=False)
check_param_or_command_type(
ctxt, FieldCompatibilityPair(old, new, cmd_name, old_struct.name),
is_command_parameter=False)
break
else:
# old chained type was not found in new chained types.
ctxt.add_new_command_or_param_chained_type_not_superset_error(
cmd_name, old_chained_type.name, new_idl_file_path, old_struct.name,
is_command_parameter)
old_struct_fields = get_all_struct_fields(old_struct, old_idl_file, old_idl_file_path)
new_struct_fields = get_all_struct_fields(new_struct, new_idl_file, new_idl_file_path)
# We need to special-case the stmtId parameter because it was removed. However, it's not a
# breaking change to the API because it was added and removed behind a feature flag, so it was
# never officially released.
allow_list = ["endSessions-param-stmtId", "refreshSessions-param-stmtId"]
for old_field in old_struct_fields or []:
new_field_exists = False
for new_field in new_struct_fields or []:
if new_field.name == old_field.name:
new_field_exists = True
check_command_param_or_type_struct_field(
ctxt, old_field, new_field, cmd_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path, old_struct.name, is_command_parameter)
break
allow_name: str = cmd_name + "-param-" + old_field.name
if not new_field_exists and not old_field.unstable and allow_name not in allow_list:
ctxt.add_new_param_or_command_type_field_missing_error(
cmd_name, old_field.name, old_idl_file_path, old_struct.name, is_command_parameter)
# Check if a new field has been added to the parameters or type struct.
# If so, it must be optional.
for new_field in new_struct_fields or []:
# Check that all fields in the new IDL have specified the 'unstable' field.
if new_field.unstable is None:
ctxt.add_new_param_or_command_type_field_requires_unstable_error(
cmd_name, new_field.name, new_idl_file_path, is_command_parameter)
newly_added = True
for old_field in old_struct_fields or []:
if new_field.name == old_field.name:
newly_added = False
if newly_added:
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == 'optionalBool')
if not new_field_optional and not new_field.unstable:
ctxt.add_new_param_or_command_type_field_added_required_error(
cmd_name, new_field.name, new_idl_file_path, new_struct.name,
is_command_parameter)
# Check that a new field does not have an unallowed use of 'any' as the bson_serialization_type.
any_allow_name: str = (cmd_name + "-param-" + new_field.name
if is_command_parameter else cmd_name)
# If we encounter a bson_serialization_type of None, we skip checking if 'any' is used.
if isinstance(
new_field_type, syntax.Type
) and new_field_type.bson_serialization_type is not None and "any" in new_field_type.bson_serialization_type:
# If 'any' is not explicitly allowed as the bson_serialization_type.
any_allow = any_allow_name in ALLOW_ANY_TYPE_LIST or new_field_type.name == 'optionalBool'
if not any_allow:
ctxt.add_new_command_or_param_type_bson_any_not_allowed_error(
cmd_name, new_field_type.name, old_idl_file_path, new_field.name,
is_command_parameter)
def check_command_param_or_type_struct_field(
ctxt: IDLCompatibilityContext, old_field: syntax.Field, new_field: syntax.Field,
cmd_name: str, old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str, type_name: Optional[str],
is_command_parameter: bool):
"""Check compatibility between the old and new command parameter or command type struct field."""
# pylint: disable=too-many-arguments
field_name: str = cmd_name + "-param-" + new_field.name
if not old_field.unstable and new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
ctxt.add_new_param_or_command_type_field_unstable_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
# If old field is unstable and new field is stable, the new field should either be optional or
# have a default value.
old_field_type = get_field_type(old_field, old_idl_file, old_idl_file_path)
new_field_type = get_field_type(new_field, new_idl_file, new_idl_file_path)
old_field_optional = old_field.optional or (old_field_type
and old_field_type.name == "optionalBool")
new_field_optional = new_field.optional or (new_field_type
and new_field_type.name == "optionalBool")
if old_field.unstable and not new_field.unstable and not new_field_optional and new_field.default is None:
ctxt.add_new_param_or_command_type_field_stable_required_no_default_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
if old_field_optional and not new_field_optional:
ctxt.add_new_param_or_command_type_field_required_error(
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
if not old_field.unstable:
check_param_or_type_validator(ctxt, old_field, new_field, cmd_name, new_idl_file_path,
type_name, is_command_parameter)
old_field_compatibility = FieldCompatibility(old_field_type, old_idl_file, old_idl_file_path,
old_field.unstable, old_field.optional)
new_field_compatibility = FieldCompatibility(new_field_type, new_idl_file, new_idl_file_path,
new_field.unstable, new_field.optional)
field_pair = FieldCompatibilityPair(old_field_compatibility, new_field_compatibility, cmd_name,
old_field.name)
check_param_or_command_type(ctxt, field_pair, is_command_parameter)
def check_namespace(ctxt: IDLCompatibilityContext, old_cmd: syntax.Command, new_cmd: syntax.Command,
old_idl_file: syntax.IDLParsedSpec, new_idl_file: syntax.IDLParsedSpec,
old_idl_file_path: str, new_idl_file_path: str):
"""Check compatibility between old and new namespace."""
# pylint: disable=too-many-arguments
old_namespace = old_cmd.namespace
new_namespace = new_cmd.namespace
# IDL parser already checks that namespace must be one of these 4 types.
if old_namespace == common.COMMAND_NAMESPACE_IGNORED:
if new_namespace != common.COMMAND_NAMESPACE_IGNORED:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID:
if new_namespace not in (common.COMMAND_NAMESPACE_IGNORED,
common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB_OR_UUID):
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_CONCATENATE_WITH_DB:
if new_namespace == common.COMMAND_NAMESPACE_TYPE:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
elif old_namespace == common.COMMAND_NAMESPACE_TYPE:
old_type = get_field_type(old_cmd, old_idl_file, old_idl_file_path)
if new_namespace == common.COMMAND_NAMESPACE_TYPE:
new_type = get_field_type(new_cmd, new_idl_file, new_idl_file_path)
old = FieldCompatibility(old_type, old_idl_file, old_idl_file_path, unstable=False,
optional=False)
new = FieldCompatibility(new_type, new_idl_file, new_idl_file_path, unstable=False,
optional=False)
check_param_or_command_type(ctxt,
FieldCompatibilityPair(old, new, old_cmd.command_name, ""),
is_command_parameter=False)
# If old type is "namespacestring", the new namespace can be changed to any
# of the other namespace types.
elif old_type.name != "namespacestring":
# Otherwise, the new namespace can only be changed to "ignored".
if new_namespace != common.COMMAND_NAMESPACE_IGNORED:
ctxt.add_new_namespace_incompatible_error(old_cmd.command_name, old_namespace,
new_namespace, new_idl_file_path)
else:
assert False, 'unrecognized namespace option'
def check_error_reply(old_basic_types_path: str, new_basic_types_path: str,
old_import_directories: List[str],
new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new ErrorReply."""
old_idl_dir = os.path.dirname(old_basic_types_path)
new_idl_dir = os.path.dirname(new_basic_types_path)
ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())
with open(old_basic_types_path) as old_file:
old_idl_file = parser.parse(old_file, old_basic_types_path,
CompilerImportResolver(old_import_directories))
if old_idl_file.errors:
old_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {old_basic_types_path}")
old_error_reply_struct = old_idl_file.spec.symbols.get_struct("ErrorReply")
if old_error_reply_struct is None:
ctxt.add_missing_error_reply_struct_error(old_basic_types_path)
else:
with open(new_basic_types_path) as new_file:
new_idl_file = parser.parse(new_file, new_basic_types_path,
CompilerImportResolver(new_import_directories))
if new_idl_file.errors:
new_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {new_basic_types_path}")
new_error_reply_struct = new_idl_file.spec.symbols.get_struct("ErrorReply")
if new_error_reply_struct is None:
ctxt.add_missing_error_reply_struct_error(new_basic_types_path)
else:
check_reply_fields(ctxt, old_error_reply_struct, new_error_reply_struct, "n/a",
old_idl_file, new_idl_file, old_basic_types_path,
new_basic_types_path)
ctxt.errors.dump_errors()
return ctxt.errors
def split_complex_checks(
complex_checks: List[syntax.AccessCheck]) -> Tuple[List[str], List[syntax.Privilege]]:
"""Split a list of AccessCheck into checks and privileges."""
checks = [x.check for x in complex_checks if x.check is not None]
privileges = [x.privilege for x in complex_checks if x.privilege is not None]
# Sort the list of privileges by the length of the action_type list, in decreasing order
# so that two lists of privileges can be compared later.
return checks, sorted(privileges, key=lambda x: len(x.action_type), reverse=True)
def check_complex_checks(ctxt: IDLCompatibilityContext,
old_complex_checks: List[syntax.AccessCheck],
new_complex_checks: List[syntax.AccessCheck], cmd: syntax.Command,
new_idl_file_path: str) -> None:
"""Check the compatibility between complex access checks of the old and new command."""
cmd_name = cmd.command_name
if len(new_complex_checks) > len(old_complex_checks):
ctxt.add_new_additional_complex_access_check_error(cmd_name, new_idl_file_path)
else:
old_checks, old_privileges = split_complex_checks(old_complex_checks)
new_checks, new_privileges = split_complex_checks(new_complex_checks)
if not set(new_checks).issubset(old_checks):
ctxt.add_new_complex_checks_not_subset_error(cmd_name, new_idl_file_path)
if len(new_privileges) > len(old_privileges):
ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)
else:
# Check that each new_privilege matches an old_privilege (the resource_pattern is
# equal and the action_types are a subset of the old action_types).
for new_privilege in new_privileges:
for old_privilege in old_privileges:
if (new_privilege.resource_pattern == old_privilege.resource_pattern
and set(new_privilege.action_type).issubset(old_privilege.action_type)):
old_privileges.remove(old_privilege)
break
else:
ctxt.add_new_complex_privileges_not_subset_error(cmd_name, new_idl_file_path)
def split_complex_checks_agg_stages(
complex_checks: List[syntax.AccessCheck]) -> Dict[str, List[syntax.AccessCheck]]:
"""Split a list of AccessChecks into a map keyed by aggregation stage (defaults to None)."""
complex_checks_agg_stages: Dict[str, List[syntax.AccessCheck]] = dict()
for access_check in complex_checks:
agg_stage = None
if access_check.privilege is not None:
# x.privilege.agg_stage can still be None.
agg_stage = access_check.privilege.agg_stage
if agg_stage not in complex_checks_agg_stages:
complex_checks_agg_stages[agg_stage] = []
complex_checks_agg_stages[agg_stage].append(access_check)
return complex_checks_agg_stages
def check_complex_checks_agg_stages(ctxt: IDLCompatibilityContext,
old_complex_checks: List[syntax.AccessCheck],
new_complex_checks: List[syntax.AccessCheck],
cmd: syntax.Command, new_idl_file_path: str) -> None:
"""Check the compatibility between complex access checks of the old and new agggreation stages."""
new_complex_checks_agg_stages = split_complex_checks_agg_stages(new_complex_checks)
old_complex_checks_agg_stages = split_complex_checks_agg_stages(old_complex_checks)
for agg_stage in new_complex_checks_agg_stages:
# Aggregation stages are considered separate commands in the context of validating the
# Stable API. Therefore, it is okay to skip recently added aggregation stages that are
# are not present in the previous release.
if agg_stage not in old_complex_checks_agg_stages:
continue
check_complex_checks(ctxt, old_complex_checks_agg_stages[agg_stage],
new_complex_checks_agg_stages[agg_stage], cmd, new_idl_file_path)
def check_security_access_checks(ctxt: IDLCompatibilityContext,
old_access_checks: syntax.AccessChecks,
new_access_checks: syntax.AccessChecks, cmd: syntax.Command,
new_idl_file_path: str) -> None:
"""Check the compatibility between security access checks of the old and new command."""
# pylint:disable=too-many-locals,too-many-branches,too-many-nested-blocks
cmd_name = cmd.command_name
if old_access_checks is not None and new_access_checks is not None:
old_access_check_type = old_access_checks.get_access_check_type()
new_access_check_type = new_access_checks.get_access_check_type()
if old_access_check_type != new_access_check_type:
ctxt.add_access_check_type_not_equal_error(cmd_name, old_access_check_type,
new_access_check_type, new_idl_file_path)
else:
old_simple_check = old_access_checks.simple
new_simple_check = new_access_checks.simple
if old_simple_check is not None and new_simple_check is not None:
if old_simple_check.check != new_simple_check.check:
ctxt.add_check_not_equal_error(cmd_name, old_simple_check.check,
new_simple_check.check, new_idl_file_path)
else:
old_privilege = old_simple_check.privilege
new_privilege = new_simple_check.privilege
if old_privilege is not None and new_privilege is not None:
if old_privilege.resource_pattern != new_privilege.resource_pattern:
ctxt.add_resource_pattern_not_equal_error(
cmd_name, old_privilege.resource_pattern,
new_privilege.resource_pattern, new_idl_file_path)
if not set(new_privilege.action_type).issubset(old_privilege.action_type):
ctxt.add_new_action_types_not_subset_error(cmd_name, new_idl_file_path)
old_complex_checks = old_access_checks.complex
new_complex_checks = new_access_checks.complex
if old_complex_checks is not None and new_complex_checks is not None:
check_complex_checks_agg_stages(ctxt, old_complex_checks, new_complex_checks, cmd,
new_idl_file_path)
elif new_access_checks is None and old_access_checks is not None:
ctxt.add_removed_access_check_field_error(cmd_name, new_idl_file_path)
elif old_access_checks is None and new_access_checks is not None and cmd.api_version == '1':
ctxt.add_added_access_check_field_error(cmd_name, new_idl_file_path)
def check_compatibility(old_idl_dir: str, new_idl_dir: str, old_import_directories: List[str],
new_import_directories: List[str]) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new IDL commands."""
# pylint: disable=too-many-locals
ctxt = IDLCompatibilityContext(old_idl_dir, new_idl_dir, IDLCompatibilityErrorCollection())
new_commands, new_command_file, new_command_file_path = get_new_commands(
ctxt, new_idl_dir, new_import_directories)
# Check new commands' compatibility with old ones.
# Note, a command can be added to V1 at any time, it's ok if a
# new command has no corresponding old command.
old_commands: Dict[str, syntax.Command] = dict()
for dirpath, _, filenames in os.walk(old_idl_dir):
for old_filename in filenames:
if not old_filename.endswith('.idl') or old_filename in SKIPPED_FILES:
continue
old_idl_file_path = os.path.join(dirpath, old_filename)
with open(old_idl_file_path) as old_file:
old_idl_file = parser.parse(
old_file, old_idl_file_path,
CompilerImportResolver(old_import_directories + [old_idl_dir]))
if old_idl_file.errors:
old_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {old_idl_file_path}")
for old_cmd in old_idl_file.spec.symbols.commands:
# Ignore imported commands as they will be processed in their own file.
if old_cmd.api_version == "" or old_cmd.imported:
continue
# Ignore select commands that were removed after being added to the strict API.
# Only commands that were never visible to the end-user in previous releases
# (i.e., hidden behind a feature flag) should be allowed here.
if old_cmd.command_name in IGNORE_COMMANDS_LIST:
continue
if old_cmd.api_version != "1":
# We're not ready to handle future API versions yet.
ctxt.add_command_invalid_api_version_error(
old_cmd.command_name, old_cmd.api_version, old_idl_file_path)
continue
if old_cmd.command_name in old_commands:
ctxt.add_duplicate_command_name_error(old_cmd.command_name, old_idl_dir,
old_idl_file_path)
continue
old_commands[old_cmd.command_name] = old_cmd
if old_cmd.command_name not in new_commands:
# Can't remove a command from V1
ctxt.add_command_removed_error(old_cmd.command_name, old_idl_file_path)
continue
new_cmd = new_commands[old_cmd.command_name]
new_idl_file = new_command_file[old_cmd.command_name]
new_idl_file_path = new_command_file_path[old_cmd.command_name]
if not old_cmd.strict and new_cmd.strict:
ctxt.add_command_strict_true_error(new_cmd.command_name, new_idl_file_path)
# Check compatibility of command's parameters.
check_command_params_or_type_struct_fields(
ctxt, old_cmd, new_cmd, old_cmd.command_name, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path, is_command_parameter=True)
check_namespace(ctxt, old_cmd, new_cmd, old_idl_file, new_idl_file,
old_idl_file_path, new_idl_file_path)
old_reply = old_idl_file.spec.symbols.get_struct(old_cmd.reply_type)
new_reply = new_idl_file.spec.symbols.get_struct(new_cmd.reply_type)
check_reply_fields(ctxt, old_reply, new_reply, old_cmd.command_name,
old_idl_file, new_idl_file, old_idl_file_path,
new_idl_file_path)
check_security_access_checks(ctxt, old_cmd.access_check, new_cmd.access_check,
old_cmd, new_idl_file_path)
ctxt.errors.dump_errors()
return ctxt.errors
def get_generic_arguments(gen_args_file_path: str) -> Tuple[Set[str], Set[str]]:
"""Get arguments and reply fields from generic_argument.idl and check validity."""
arguments: Set[str] = set()
reply_fields: Set[str] = set()
with open(gen_args_file_path) as gen_args_file:
parsed_idl_file = parser.parse(gen_args_file, gen_args_file_path,
CompilerImportResolver([]))
if parsed_idl_file.errors:
parsed_idl_file.errors.dump_errors()
raise ValueError(f"Cannot parse {gen_args_file_path}")
for argument in parsed_idl_file.spec.symbols.get_generic_argument_list(
"generic_args_api_v1").fields:
arguments.add(argument.name)
for reply_field in parsed_idl_file.spec.symbols.get_generic_reply_field_list(
"generic_reply_fields_api_v1").fields:
reply_fields.add(reply_field.name)
return arguments, reply_fields
def check_generic_arguments_compatibility(old_gen_args_file_path: str, new_gen_args_file_path: str
) -> IDLCompatibilityErrorCollection:
"""Check IDL compatibility between old and new generic_argument.idl files."""
# IDLCompatibilityContext takes in both 'old_idl_dir' and 'new_idl_dir',
# but for generic_argument.idl, the parent directories aren't helpful for logging purposes.
# Instead, we pass in "old generic_argument.idl" and "new generic_argument.idl"
# to make error messages clearer.
ctxt = IDLCompatibilityContext("old generic_argument.idl", "new generic_argument.idl",
IDLCompatibilityErrorCollection())
old_arguments, old_reply_fields = get_generic_arguments(old_gen_args_file_path)
new_arguments, new_reply_fields = get_generic_arguments(new_gen_args_file_path)
for old_argument in old_arguments:
if old_argument not in new_arguments:
ctxt.add_generic_argument_removed(old_argument, new_gen_args_file_path)
for old_reply_field in old_reply_fields:
if old_reply_field not in new_reply_fields:
ctxt.add_generic_argument_removed_reply_field(old_reply_field, new_gen_args_file_path)
return ctxt.errors
def main():
"""Run the script."""
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument("-v", "--verbose", action="count", help="Enable verbose logging")
arg_parser.add_argument("--old-include", dest="old_include", type=str, action="append",
default=[], help="Directory to search for old IDL import files")
arg_parser.add_argument("--new-include", dest="new_include", type=str, action="append",
default=[], help="Directory to search for new IDL import files")
arg_parser.add_argument("old_idl_dir", metavar="OLD_IDL_DIR",
help="Directory where old IDL files are located")
arg_parser.add_argument("new_idl_dir", metavar="NEW_IDL_DIR",
help="Directory where new IDL files are located")
args = arg_parser.parse_args()
error_coll = check_compatibility(args.old_idl_dir, args.new_idl_dir, args.old_include,
args.new_include)
if error_coll.has_errors():
sys.exit(1)
old_basic_types_path = os.path.join(args.old_idl_dir, "mongo/idl/basic_types.idl")
new_basic_types_path = os.path.join(args.new_idl_dir, "mongo/idl/basic_types.idl")
error_reply_coll = check_error_reply(old_basic_types_path, new_basic_types_path,
args.old_include, args.new_include)
if error_reply_coll.has_errors():
sys.exit(1)
old_generic_args_path = os.path.join(args.old_idl_dir, "mongo/idl/generic_argument.idl")
new_generic_args_path = os.path.join(args.new_idl_dir, "mongo/idl/generic_argument.idl")
error_gen_args_coll = check_generic_arguments_compatibility(old_generic_args_path,
new_generic_args_path)
if error_gen_args_coll.has_errors():
sys.exit(1)
if __name__ == "__main__":
main()
|
<reponame>naveenjafer/language
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Beam pipeline to convert BooksCorpus to shareded TFRecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import os
import random
from absl import app
from absl import flags
import apache_beam as beam
from bert import tokenization
from language.conpono.cpc.preproc.preprocessing_utils import convert_instance_to_tf_example
from language.conpono.cpc.preproc.preprocessing_utils import create_instances_from_document
from language.conpono.cpc.preproc.preprocessing_utils import LONG_CTX
from language.conpono.cpc.preproc.preprocessing_utils import ONE_SENT_CTX
import nltk
from nltk.tokenize import sent_tokenize
import tensorflow.compat.v1 as tf
flags.DEFINE_string("input_file", None, "Path to raw input files.")
flags.DEFINE_string("output_file", None, "Output TF example file.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_integer("max_seq_length", 512, "Maximum sequence length.")
flags.DEFINE_float("test_size", 0.1,
"Size of test set by factor of total dataset.")
flags.DEFINE_float("dev_size", 0.1,
"Size of dev set by factor of total dataset.")
flags.DEFINE_integer("random_seed", 12345, "A random seed")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_enum("format", LONG_CTX, [LONG_CTX, ONE_SENT_CTX],
"format of preprocessed data")
FLAGS = flags.FLAGS
def read_file(filename_tuple):
"""Read the contents of filename (str) and split into documents by chapter."""
filename, other_filenames = filename_tuple
all_documents = []
document = []
with tf.gfile.GFile(filename, "r") as reader:
for line in reader:
line = line.strip()
if not line:
continue
if line.lower()[:7] == "chapter":
if document:
all_documents.append(document)
document = []
else:
document.append(line)
if document:
all_documents.append(document)
# Remove empty documents
all_documents = [x for x in all_documents if x]
for otherfilename in other_filenames:
other_documents = []
document = []
with tf.gfile.GFile(otherfilename, "r") as reader:
for line in reader:
line = line.strip()
if not line:
continue
if line.lower()[:7] == "chapter":
if document:
other_documents.append(document)
document = []
else:
document.append(line)
if document:
other_documents.append(document)
# Remove empty documents
other_documents = [x for x in other_documents if x]
# for each document grab 3 random docs
all_docs_with_rands = []
for doc in all_documents:
all_docs_with_rands.append((doc, other_documents))
return all_docs_with_rands
def split_line_by_sentences(line):
return sent_tokenize(line)
def preproc_doc(document_tuple):
"""Convert document to list of TF Examples for binary order classification.
Args:
document_tuple: a chapter from one book as a list of lines
Returns:
A list of tfexamples of binary orderings of pairs of sentences in the
document. The tfexamples are serialized to string to be written directly
to TFRecord.
"""
document, other_docs = document_tuple
# Each document is a list of lines
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
# set a random seed for reproducability
# since this function is run in parallel, if we hardcode a seed, all
# documents will have the same permutations. Instead we use the hash of the
# first sentence as the seed so it is different for each document and it
# is still reproducible.
hash_object = hashlib.md5(document[0])
rng = random.Random(int(hash_object.hexdigest(), 16) % (10**8))
# Each document is composed of a list of sentences. We create paragraphs
# by keeping together sentences on the same line and adding adjacent sentences
# if there are fewer than 5 to form the paragraph.
# The utility functions below expect the document to be split by paragraphs.
list_of_paragraphs = []
paragraph = []
for line in document:
line = tokenization.convert_to_unicode(line)
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'")
sents = split_line_by_sentences(line)
for sent in sents:
tokens = tokenizer.tokenize(sent)
if tokens:
paragraph.append(tokens)
if len(paragraph) > 10:
list_of_paragraphs.append(paragraph)
paragraph = []
# In case of any empty paragraphs, remove them.
list_of_paragraphs = [x for x in list_of_paragraphs if x]
# Redo above with otherdocs
list_of_para_other_docs = []
paragraph = []
for doc in other_docs:
for line in doc:
line = tokenization.convert_to_unicode(line)
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'")
sents = split_line_by_sentences(line)
for sent in sents:
tokens = tokenizer.tokenize(sent)
if tokens:
paragraph.append(tokens)
if len(paragraph) > 10:
list_of_para_other_docs.append(paragraph)
paragraph = []
# In case of any empty paragraphs, remove them.
list_of_para_other_docs = [x for x in list_of_para_other_docs if x]
# we need to be able to sample from multiple paragraphs
if len(list_of_para_other_docs) == 1 or \
sum([len(x) for x in list_of_para_other_docs]) < 35:
return []
# Convert the list of paragraphs into TrainingInstance object
# See preprocessing_utils.py for definition
instances = create_instances_from_document(list_of_paragraphs,
FLAGS.max_seq_length, rng,
list_of_para_other_docs,
FLAGS.format)
# Convert token lists into ids and add any needed tokens and padding for BERT
tf_examples = [
convert_instance_to_tf_example(tokenizer, instance,
FLAGS.max_seq_length)[0]
for instance in instances
]
# Serialize TFExample for writing to file.
tf_examples = [example.SerializeToString() for example in tf_examples]
return tf_examples
def books_pipeline():
"""Read Books Corpus filenames and create Beam pipeline."""
# set a random seed for reproducability
rng = random.Random(FLAGS.random_seed)
# BooksCorpus is organized into directories of genre and files of books
# adventure-all.txt seems to contain all the adventure books in 1 file
# romance-all.txt is the same. None of the other directories have this,
# so we will skip it to not double count those books
file_name_set = set()
input_files_by_genre = collections.defaultdict(list)
for path, _, fnames in tf.gfile.Walk(FLAGS.input_file):
genre = path.split("/")[-1]
for fname in fnames:
if fname == "adventure-all.txt" or fname == "romance-all.txt":
continue
if fname in file_name_set:
continue
file_name_set.add(fname)
input_files_by_genre[genre].append(path + "/" + fname)
# Sort genres and iterate in order for reproducability
train_files, dev_files, test_files = [], [], []
for genre, file_list in sorted(input_files_by_genre.items()):
rng.shuffle(file_list)
genre_size = len(file_list)
test_size = int(FLAGS.test_size * genre_size)
dev_size = int(FLAGS.dev_size * genre_size)
test_files.extend(file_list[:test_size])
dev_files.extend(file_list[test_size:test_size + dev_size])
train_files.extend(file_list[test_size + dev_size:])
assert len(file_list[:test_size]) + \
len(file_list[test_size:test_size+dev_size]) + \
len(file_list[test_size+dev_size:]) == len(file_list)
# make sure there is no test train overlap
for filename in train_files:
assert filename not in test_files
assert filename not in dev_files
for filename in dev_files:
assert filename not in test_files
train_files = [(f, random.sample(train_files, 3)) for f in train_files]
dev_files = [(f, random.sample(dev_files, 3)) for f in dev_files]
test_files = [(f, random.sample(test_files, 3)) for f in test_files]
rng.shuffle(train_files)
rng.shuffle(dev_files)
rng.shuffle(test_files)
def pipeline(root):
"""Beam pipeline for converting Books Corpus files to TF Examples."""
_ = (
root | "Create test files" >> beam.Create(test_files)
| "Read test files" >> beam.FlatMap(read_file)
| "test Shuffle" >> beam.Reshuffle()
| "Preproc test docs" >> beam.FlatMap(preproc_doc)
| "record test Shuffle" >> beam.Reshuffle()
| "Write to test tfrecord" >> beam.io.WriteToTFRecord(
FLAGS.output_file + ".cpc." + FLAGS.format + ".test.tfrecord",
num_shards=100))
_ = (
root | "Create dev files" >> beam.Create(dev_files)
| "Read dev files" >> beam.FlatMap(read_file)
| "dev Shuffle" >> beam.Reshuffle()
| "Preproc dev docs" >> beam.FlatMap(preproc_doc)
| "record dev Shuffle" >> beam.Reshuffle()
| "Write to dev tfrecord" >> beam.io.WriteToTFRecord(
FLAGS.output_file + ".cpc." + FLAGS.format + ".dev.tfrecord",
num_shards=100))
_ = (
root | "Create train files" >> beam.Create(train_files)
| "Read train files" >> beam.FlatMap(read_file)
| "train Shuffle" >> beam.Reshuffle()
| "Preproc train docs" >> beam.FlatMap(preproc_doc)
| "record train Shuffle" >> beam.Reshuffle()
| "Write to train tfrecord" >> beam.io.WriteToTFRecord(
FLAGS.output_file + ".cpc." + FLAGS.format + ".train.tfrecord",
num_shards=500))
return
return pipeline
def main(_):
# If using Apache BEAM, execute runner here.
if __name__ == "__main__":
app.run(main)
|
<reponame>tlb-lab/credoscript<filename>credoscript/models/structure.py
from sqlalchemy import Integer, select
from sqlalchemy.sql.expression import and_, cast
from sqlalchemy.orm import backref, deferred, relationship
from sqlalchemy.orm.collections import attribute_mapped_collection
from credoscript import Base, schema, Session, citations
class Structure(Base):
"""
Represents a PDB Structure entity from CREDO.
Attributes
----------
structure_id : int
Primary key.
pdb : string
PDB 4-letter code.
title : string
A title for the data block. The author should attempt to convey the
essence of the structure archived in the CIF in the title, and to
distinguish this structural result from others (struct.title).
authors : string
Concatenated string of author names.
exptl : string
The method used in the experiment (exptl.method).
deposition :
Date the entry first entered the PDB database in the form yyyy-mm-dd.
Taken from the PDB HEADER record (database_PDB_rev.date_original).
release :
Date the PDB revision took place. Taken from the REVDAT record
(database_PDB_rev.date).
resolution : float
The smallest value for the interplanar spacings for the reflection data
used in the refinement in angstroms. This is called the highest
resolution (refine.ls_d_res_high).
r_factor : float
Residual factor R for reflections* that satisfy the
reflns.observed_criterion were included in the refinement (when the
refinement included the calculation of a 'free' R factor)
(refine.ls_R_factor_R_work).
r_free : float
Residual factor R for reflections that satisfy the
reflns.observed_criterion that were used as test reflections (i.e. were
excluded from the refinement) (refine.ls_R_factor_R_free)
pH : float
The pH at which the crystal was grown (exptl_crystal_grow.pH).
dpi: float
Diffraction-Component Precision Index.
dpi_theoretical_min: float
The theoretical minimal value of the DPI.
num_biomolecules : int
Number of biomolecules generated for this Structure.
Mapped Attributes
-----------------
biomolecules : Query
All biomolecules that have this `Structure` as parent. '0' means that a
stable prediction was not found in PISA.
biomolecule_map : dict
Dictionary in the form {biomolecule: Biomolecule} containing all biomolecules
that have this `Structure` as parent. '0' means that a stable prediction
was not found in PISA.
xrefs : Query
CREDO XRef objects that are associated with this Structure Entity.
abstracts:
See Also
--------
StructureAdaptor : Fetch Structures from the database.
Notes
-----
- The __getitem__ method is overloaded so that Structure[1] will return
the first Biomolecule (biological assembly) of this structure
"""
__tablename__ = '%s.structures' % schema['credo']
Biomolecules = relationship("Biomolecule",
primaryjoin="Biomolecule.structure_id==Structure.structure_id",
foreign_keys="[Biomolecule.structure_id]",
uselist=True, innerjoin=True, lazy='dynamic',
backref=backref('Structure', uselist=False, innerjoin=True))
# map biomolecules as dictionary in the form {<assembly serial>: biomolecule}
BiomoleculeMap = relationship("Biomolecule",
collection_class=attribute_mapped_collection("assembly_serial"),
primaryjoin="Biomolecule.structure_id==Structure.structure_id",
foreign_keys="[Biomolecule.structure_id]",
uselist=True, innerjoin=True)
XRefs = relationship("XRef",
primaryjoin="and_(XRef.entity_type=='Structure', XRef.entity_id==Structure.structure_id)",
foreign_keys="[XRef.entity_type, XRef.entity_id]",
uselist=True, innerjoin=True, lazy='dynamic')
def __repr__(self):
"""
"""
return "<Structure({self.pdb})>".format(self=self)
def __getitem__(self, assembly_serial):
"""
Returns the Biomolecule with the specified biomolecule serial number or
None.
Parameters
----------
assembly_serial : int
Serial number of the biological assembly derived from this Structure.
Returns
-------
biomolecule : Biomolecule
"""
return self.BiomoleculeMap.get(assembly_serial)
def __iter__(self):
"""
Returns the Biomolecules of this Structure.
Returns
-------
biomolecules : list
Biological assemblies derived from this asymmetric unit.
"""
return iter(self.Biomolecules.all())
@property
def abstracts(self):
"""
Returns the abstract(s) of the journal articles that are associated with
this PDB entry.
"""
session = Session()
statement = select([citations],
and_(citations.c.pubmed_id==cast(XRef.xref, Integer),
XRef.source=='PubMed', XRef.entity_type=='Structure',
XRef.entity_id==self.structure_id))
return session.execute(statement).fetchall()
from ..models.xref import XRef
|
<reponame>kadenP/TheRoleOfBuildingsInAChangingEnvironmentalEra_PleweMSThesis
'''
<NAME>
3/5/2019
Optimization Model for SEB Single Thermal Zone Building
This will define an optimization problem based on the small office EnergyPlus model. It will be passed into
the optimization algorithm directly.
idf location: C:\Users\Owner\OneDrive\Research\Masters Thesis\Open Studio\Building Models
idd location: C:\EnergyPlusV8-5-0\Energy+.idd
eppy location: C:\Users\Owner\Anaconda3\Lib\site-packages\eppy
'''
'''import libraries'''
from SmallOfficeModules import configuresmalloffice, smallofficeoutputs
from eppy.modeleditor import IDF
import eppy.json_functions as json_functions
import os
import json
import csv
from collections import defaultdict
import numpy as np
from platypus import Problem, Real
import random
# OptCS = "global"; OptCS = []; OptHS = "global"; OptHS = []
'''parameter set used to apply uncertainty'''
# with open('jsonOUTPUT_PMVOpt10.txt') as jsonParams:
# paramSet = json.load(jsonParams)
paramSet = {'input': []}
'''optimization problem for hour 1 of 24'''
class SO1(Problem):
def __init__(self, Begin_Month, Begin_Day_of_Month, End_Month, End_Day_of_Month):
'''define SEB problem as having 30 decision variables (Space Thermostat HTG and CLG Setpoint), 2 objective
(HVAC Demand + PMV) and 48 constraints (PMV values and derivatives)'''
super(SO1, self).__init__(48, 3, 48)
'''define the two decision variables as real values with limited ranges
30 total variables for heating and cooling setpoints for a 24 hour period'''
self.types[:] = [Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30),
Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30),
Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30),
Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30), Real(23.5, 30),
Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23),
Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23),
Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23),
Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23), Real(15.5, 23)]
'''define the types of constraints that will be used in the problem definition'''
self.constraints[:] = "<=0"
'''introduce the necessary files for the building simulation'''
self.iddfile = "C:\EnergyPlusV8-5-0\Energy+.idd"
self.fname = "SmallOffice.idf"
self.weatherfile = "USA_MI_Lansing-Capital.City.AP.725390_TMY3.epw"
'''initialize idf file'''
IDF.setiddname(self.iddfile)
self.idfdevice = IDF(self.fname, self.weatherfile)
'''initialize idf file for specified outputs and simulation period'''
'''update the run period fields'''
for object in self.idfdevice.idfobjects['RUNPERIOD']:
object.Begin_Month = Begin_Month
object.Begin_Day_of_Month = Begin_Day_of_Month
object.End_Month = End_Month
object.End_Day_of_Month = End_Day_of_Month
'''update the simulation control variables'''
for object in self.idfdevice.idfobjects['SIMULATIONCONTROL']:
object.Do_Zone_Sizing_Calculation = 'Yes'
object.Do_System_Sizing_Calculation = 'Yes'
object.Do_Plant_Sizing_Calculation = 'Yes'
object.Run_Simulation_for_Sizing_Periods = 'No'
object.Run_Simulation_for_Weather_File_Run_Periods = 'Yes'
print('=== Sumulation Control Parameters Changed ===')
'''add thermal comfort model to people objects'''
for object in self.idfdevice.idfobjects['PEOPLE']:
object.Surface_NameAngle_Factor_List_Name = ''
object.Work_Efficiency_Schedule_Name = 'WORK_EFF_SCH'
object.Clothing_Insulation_Schedule_Name = 'CLOTHING_SCH'
object.Air_Velocity_Schedule_Name = 'AIR_VELO_SCH'
object.Thermal_Comfort_Model_1_Type = 'Fanger'
'''Fanger PMV thermal comfort model (Zone Average)'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermal Comfort Fanger Model PMV'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Fanger PPD thermal comfort model (Zone Average)'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermal Comfort Fanger Model PPD'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Total Purchase Electric Energy [J]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Facility Total Purchased Electric Energy'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Total HVAC Demand [W]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Facility Total HVAC Electric Demand Power'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Hourly cooling temperature setpoint [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Cooling Setpoint Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Hourly heating temperature setpoint [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Heating Setpoint Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Zone thermostat air temperature [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Air Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
def evaluate(self, solution):
self.CSP1 = solution.variables[0]
self.CSP2 = solution.variables[1]
self.CSP3 = solution.variables[2]
self.CSP4 = solution.variables[3]
self.CSP5 = solution.variables[4]
self.CSP6 = solution.variables[5]
self.CSP7 = solution.variables[6]
self.CSP8 = solution.variables[7]
self.CSP9 = solution.variables[8]
self.CSP10 = solution.variables[9]
self.CSP11 = solution.variables[10]
self.CSP12 = solution.variables[11]
self.CSP13 = solution.variables[12]
self.CSP14 = solution.variables[13]
self.CSP15 = solution.variables[14]
self.CSP16 = solution.variables[15]
self.CSP17 = solution.variables[16]
self.CSP18 = solution.variables[17]
self.CSP19 = solution.variables[18]
self.CSP20 = solution.variables[19]
self.CSP21 = solution.variables[20]
self.CSP22 = solution.variables[21]
self.CSP23 = solution.variables[22]
self.CSP24 = solution.variables[23]
self.HSP1 = solution.variables[24]
self.HSP2 = solution.variables[25]
self.HSP3 = solution.variables[26]
self.HSP4 = solution.variables[27]
self.HSP5 = solution.variables[28]
self.HSP6 = solution.variables[29]
self.HSP7 = solution.variables[30]
self.HSP8 = solution.variables[31]
self.HSP9 = solution.variables[32]
self.HSP10 = solution.variables[33]
self.HSP11 = solution.variables[34]
self.HSP12 = solution.variables[35]
self.HSP13 = solution.variables[36]
self.HSP14 = solution.variables[37]
self.HSP15 = solution.variables[38]
self.HSP16 = solution.variables[39]
self.HSP17 = solution.variables[40]
self.HSP18 = solution.variables[41]
self.HSP19 = solution.variables[42]
self.HSP20 = solution.variables[43]
self.HSP21 = solution.variables[44]
self.HSP22 = solution.variables[45]
self.HSP23 = solution.variables[46]
self.HSP24 = solution.variables[47]
self.results = buildingSim(self.idfdevice, [self.CSP1, self.CSP2, self.CSP3, self.CSP4, self.CSP5, self.CSP6,
self.CSP7, self.CSP8, self.CSP9, self.CSP10, self.CSP11, self.CSP12,
self.CSP13, self.CSP14, self.CSP15, self.CSP16, self.CSP17, self.CSP18,
self.CSP19, self.CSP20, self.CSP21, self.CSP22, self.CSP23, self.CSP24],
[self.HSP1, self.HSP2, self.HSP3, self.HSP4, self.HSP5, self.HSP6,
self.HSP7, self.HSP8, self.HSP9, self.HSP10, self.HSP11, self.HSP12,
self.HSP13, self.HSP14, self.HSP15, self.HSP16, self.HSP17, self.HSP18,
self.HSP19, self.HSP20, self.HSP21, self.HSP22, self.HSP23, self.HSP24])
print('=== hvacPower_ave = %f ===' % self.results.hvacPower_ave)
print('=== allPMV_max = %f ===' % self.results.allPMV_max)
print('=== allPMV_min = %f ===' % self.results.allPMV_min)
'''matrix that extracts pmv values for working hours'''
pmvI = np.identity(48)
pmvA = np.identity(48)*5
# offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
# 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]
offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 42, 43, 44, 45, 46, 47]
# offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23]
for i in offHours:
pmvI[i, i] = 0
pmvA[i, i] = 0
'''matrix for hvac power weight'''
hvacA = np.identity(48)*0.0000001
'''matrix for applying derivative constraint'''
diagonal = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
D = np.diag(diagonal, 1)
setpoints = np.array([self.CSP1, self.CSP2, self.CSP3, self.CSP4, self.CSP5, self.CSP6, self.CSP7, self.CSP8,
self.CSP9, self.CSP10, self.CSP11, self.CSP12, self.CSP13, self.CSP14, self.CSP15,
self.CSP16, self.CSP17, self.CSP18, self.CSP19, self.CSP20, self.CSP21, self.CSP22,
self.CSP23, self.CSP24,
self.HSP1, self.HSP2, self.HSP3, self.HSP4, self.HSP5, self.HSP6, self.HSP7, self.HSP8,
self.HSP9, self.HSP10, self.HSP11, self.HSP12, self.HSP13, self.HSP14, self.HSP15,
self.HSP16, self.HSP17, self.HSP18, self.HSP19, self.HSP20, self.HSP21, self.HSP22,
self.HSP23, self.HSP24])
'''matrix for changing setpoint downwards cost'''
constDownA = np.identity(48)*0.05
constrainDown = setpoints.T - D@setpoints.T
print(constrainDown)
print('objective 1: %f' % (self.results.hvacPower[0:48]@hvacA@self.results.hvacPower[0:48].T))
print('objective 2: %f' % (self.results.allPMV_mean1[0, 0:48]@pmvA@self.results.allPMV_mean1[0, 0:48].T))
print('objective 3: %f' % (constrainDown@constDownA@constrainDown.T))
'''hvac power demand and predicted mean vote objective function'''
solution.objectives[0] = np.sqrt((self.results.hvacPower[0:48]@hvacA@self.results.hvacPower[0:48].T))
solution.objectives[1] = np.sqrt((self.results.allPMV_mean1[0, 0:48]@pmvA@self.results.allPMV_mean1[0, 0:48].T))
solution.objectives[2] = np.sqrt((constrainDown@const<EMAIL>@constr<EMAIL>))
'''thermal comfort constraints'''
solution.constraints[:] = abs(pmvI@self.results.allPMV_mean1[0, 0:48].T) - 1
'''optimization problem for a single set point temperature (for simplicity)'''
class SO2(Problem):
def __init__(self, Begin_Month, Begin_Day_of_Month, End_Month, End_Day_of_Month):
'''define SEB problem as having 2 decision variables (Space Thermostat HTG and CLG Setpoint), 2 objective
(HVAC Demand + PMV) and 48 constraints (PMV values and derivatives)'''
super(SO2, self).__init__(2, 2, 48)
'''define the two decision variables as real values with limited ranges
30 total variables for heating and cooling setpoints for a 24 hour period'''
self.types[:] = [Real(23.5, 30), Real(15.5, 23)]
'''define the types of constraints that will be used in the problem definition'''
self.constraints[:] = "<=0"
'''introduce the necessary files for the building simulation'''
self.iddfile = "C:\EnergyPlusV8-5-0\Energy+.idd"
self.fname = "SmallOffice.idf"
self.weatherfile = "USA_MI_Lansing-Capital.City.AP.725390_TMY3.epw"
'''initialize idf file'''
IDF.setiddname(self.iddfile)
self.idfdevice = IDF(self.fname, self.weatherfile)
'''initialize idf file for specified outputs and simulation period'''
'''update the run period fields'''
for object in self.idfdevice.idfobjects['RUNPERIOD']:
object.Begin_Month = Begin_Month
object.Begin_Day_of_Month = Begin_Day_of_Month
object.End_Month = End_Month
object.End_Day_of_Month = End_Day_of_Month
'''update the simulation control variables'''
for object in self.idfdevice.idfobjects['SIMULATIONCONTROL']:
object.Do_Zone_Sizing_Calculation = 'Yes'
object.Do_System_Sizing_Calculation = 'Yes'
object.Do_Plant_Sizing_Calculation = 'Yes'
object.Run_Simulation_for_Sizing_Periods = 'No'
object.Run_Simulation_for_Weather_File_Run_Periods = 'Yes'
print('=== Sumulation Control Parameters Changed ===')
'''add thermal comfort model to people objects'''
for object in self.idfdevice.idfobjects['PEOPLE']:
object.Surface_NameAngle_Factor_List_Name = ''
object.Work_Efficiency_Schedule_Name = 'WORK_EFF_SCH'
object.Clothing_Insulation_Schedule_Name = 'CLOTHING_SCH'
object.Air_Velocity_Schedule_Name = 'AIR_VELO_SCH'
object.Thermal_Comfort_Model_1_Type = 'Fanger'
'''Fanger PMV thermal comfort model (Zone Average)'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermal Comfort Fanger Model PMV'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Fanger PPD thermal comfort model (Zone Average)'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermal Comfort Fanger Model PPD'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Total Purchase Electric Energy [J]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Facility Total Purchased Electric Energy'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Total HVAC Demand [W]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Facility Total HVAC Electric Demand Power'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Hourly cooling temperature setpoint [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Cooling Setpoint Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Hourly heating temperature setpoint [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Heating Setpoint Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
'''Zone thermostat air temperature [°C]'''
self.idfdevice.newidfobject('OUTPUT:VARIABLE')
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Variable_Name = 'Zone Thermostat Air Temperature'
self.idfdevice.idfobjects['OUTPUT:VARIABLE'][-1].Reporting_Frequency = 'Hourly'
def evaluate(self, solution):
self.CSP1 = solution.variables[0]
self.HSP1 = solution.variables[1]
self.results = buildingSim(self.idfdevice, [self.CSP1],
[self.HSP1])
print('=== hvacPower_ave = %f ===' % self.results.hvacPower_ave)
print('=== allPMV_max = %f ===' % self.results.allPMV_max)
print('=== allPMV_min = %f ===' % self.results.allPMV_min)
'''matrix that extracts pmv values for working hours'''
pmvI = np.identity(48)
pmvA = np.identity(48)*5
# offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
# 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]
offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 42, 43, 44, 45, 46, 47]
# offHours = [0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23]
for i in offHours:
pmvI[i, i] = 0
pmvA[i, i] = 0
'''matrix for hvac power weight'''
hvacA = np.identity(48)*0.0000001
'''matrix for applying derivative constraint'''
# diagonal = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
# 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
# D = np.diag(diagonal, 1)
# setpoints = np.array([self.CSP1,
# self.HSP1])
'''matrix for changing setpoint downwards cost'''
# constDownA = np.identity(48)*0.05
# constrainDown = setpoints.T - D@setpoints.T
# print(constrainDown)
print('objective 1: %f' % (self.results.hvacPower[0:48]@hvacA@self.results.hvacPower[0:48].T))
print('objective 2: %f' % (self.results.allPMV_mean1[0, 0:48]@pmvA@self.results.allPMV_mean1[0, 0:48].T))
# print('objective 3: %f' % (constrainDown@constDownA@constrainDown.T))
'''hvac power demand and predicted mean vote objective function'''
solution.objectives[0] = np.sqrt((self.results.hvacPower[0:48]@hvacA@self.results.hvacPower[0:48].T))
solution.objectives[1] = np.sqrt((self.results.allPMV_mean1[0, 0:48]@pmvA@self.results.allPMV_mean1[0, 0:48].T))
# solution.objectives[2] = np.sqrt((constrainDown@constDownA@constrainDown.T))
'''thermal comfort constraints'''
solution.constraints[:] = abs(pmvI@self.results.allPMV_mean1[0, 0:48].T) - 1
class buildingSim:
def __init__(self, idfdevice, CLG_SETPOINT, HTG_SETPOINT):
'''update setpoints and run energyplus simulation'''
'''append setpoints from optimizer to the optimized list'''
# OptCS[(24 - len(CLG_SETPOINT)):] = CLG_SETPOINT
# OptHS[(24 - len(HTG_SETPOINT)):] = HTG_SETPOINT
'''update idf with uncertain parameters for the parameter file listed'''
runJSON = {}
for object in paramSet['input']: runJSON[object['eppy json string']] = object['Sample Values'][random.randint(0, len(object['Sample Values'])-1)]
json_functions.updateidf(idfdevice, runJSON)
'''modify idf with inputs'''
self.runJSON = {'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_1': 'Through: %s/%s' % ('12', '31'),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_2': 'For: Weekday',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_3': 'Until: 1:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_4': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_5': 'Until: 2:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_6': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_7': 'Until: 3:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_8': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_9': 'Until: 4:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_10': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_11': 'Until: 5:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_12': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_13': 'Until: 6:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_14': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_15': 'Until: 7:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_16': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_17': 'Until: 8:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_18': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_19': 'Until: 9:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_20': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_21': 'Until: 10:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_22': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_23': 'Until: 11:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_24': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_25': 'Until: 12:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_26': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_27': 'Until: 13:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_28': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_29': 'Until: 14:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_30': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_31': 'Until: 15:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_32': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_33': 'Until: 16:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_34': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_35': 'Until: 17:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_36': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_37': 'Until: 18:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_38': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_39': 'Until: 19:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_40': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_41': 'Until: 20:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_42': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_43': 'Until: 21:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_44': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_45': 'Until: 22:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_46': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_47': 'Until: 23:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_48': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_49': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_50': str(CLG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_51': 'For: Weekend',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_52': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_53': str(29.44),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_54': 'For: Holiday',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_55': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_56': str(29.44),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_57': 'For: WinterDesignDay',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_58': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_59': str(29.44),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_60': 'For: SummerDesignDay',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_61': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_62': str(29.44),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_63': 'For: CustomDay1',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_64': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_65': str(29.44),
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_66': 'For: CustomDay2',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_67': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.CLGSETP_SCH_YES_OPTIMUM.Field_68': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_1': 'Through: %s/%s' % ('12', '31'),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_2': 'For: Weekday',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_3': 'Until: 1:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_4': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_5': 'Until: 2:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_6': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_7': 'Until: 3:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_8': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_9': 'Until: 4:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_10': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_11': 'Until: 5:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_12': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_13': 'Until: 6:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_14': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_15': 'Until: 7:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_16': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_17': 'Until: 8:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_18': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_19': 'Until: 9:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_20': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_21': 'Until: 10:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_22': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_23': 'Until: 11:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_24': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_25': 'Until: 12:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_26': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_27': 'Until: 13:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_28': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_29': 'Until: 14:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_30': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_31': 'Until: 15:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_32': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_33': 'Until: 16:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_34': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_35': 'Until: 17:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_36': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_37': 'Until: 18:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_38': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_39': 'Until: 19:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_40': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_41': 'Until: 20:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_42': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_43': 'Until: 21:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_44': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_45': 'Until: 22:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_46': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_47': 'Until: 23:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_48': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_49': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_50': str(HTG_SETPOINT[0]),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_51': 'For: Weekend',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_52': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_53': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_54': 'For: Holiday',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_55': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_56': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_57': 'For: WinterDesignDay',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_58': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_59': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_60': 'For: SummerDesignDay',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_61': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_62': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_63': 'For: CustomDay1',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_64': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_65': str(29.44),
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_66': 'For: CustomDay2',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_67': 'Until: 24:00',
'idf.SCHEDULE:COMPACT.HTGSETP_SCH_YES_OPTIMUM.Field_68': str(29.44)
}
json_functions.updateidf(idfdevice, self.runJSON)
'''run IDF and the associated batch file to export the custom csv output'''
'''self, idf='SmallOffice.idf', weather='USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3.epw', ep_version='8-5-0'''
idfdevice.run(verbose='q')
os.system(r'CD E:\Masters Thesis\EnergyPlus MPC\Simulations\Baseline')
os.system('CustomCSV SO OUTPUT')
# self.smallofficeoutputs('SO_OUTPUT_hourly.csv')
'''Read csv file into new data dictionary'''
newEntry = defaultdict(list)
with open('SO_OUTPUT_hourly.csv', newline='') as newFile:
newData = csv.DictReader(newFile)
for row in newData:
[newEntry[key].append(value) for key, value in row.items()]
'''Date/Time array'''
self.DateTime = np.asarray(newEntry['Date/Time'], dtype=str)
'''Outdoor dry bulb temperature'''
self.outdoorT = np.asarray(newEntry['Environment:Site Outdoor Air Drybulb Temperature [C](Hourly)'],
dtype=np.float32)
'''PMV values for core zone'''
self.corePMV = np.asarray(newEntry['CORE_ZN:Zone Thermal Comfort Fanger Model PMV [](Hourly)'],
dtype=np.float32)
self.corePMV_mean = np.mean(self.corePMV)
self.corePMV_max = np.max(self.corePMV)
self.corePMV_min = np.min(self.corePMV)
'''PMV values for zone 1'''
self.zn1PMV = np.asarray(newEntry['PERIMETER_ZN_1:Zone Thermal Comfort Fanger Model PMV [](Hourly)'],
dtype=np.float32)
self.zn1PMV_mean = np.mean(self.zn1PMV)
self.zn1PMV_max = np.max(self.zn1PMV)
self.zn1PMV_min = np.min(self.zn1PMV)
'''PMV values for zone 2'''
self.zn2PMV = np.asarray(newEntry['PERIMETER_ZN_2:Zone Thermal Comfort Fanger Model PMV [](Hourly)'],
dtype=np.float32)
self.zn2PMV_mean = np.mean(self.zn2PMV)
self.zn2PMV_max = np.max(self.zn2PMV)
self.zn2PMV_min = np.min(self.zn2PMV)
'''PMV values for zone 3'''
self.zn3PMV = np.asarray(newEntry['PERIMETER_ZN_3:Zone Thermal Comfort Fanger Model PMV [](Hourly)'],
dtype=np.float32)
self.zn3PMV_mean = np.mean(self.zn3PMV)
self.zn3PMV_max = np.max(self.zn3PMV)
self.zn3PMV_min = np.min(self.zn3PMV)
'''PMV values for zone 4'''
self.zn4PMV = np.asarray(newEntry['PERIMETER_ZN_4:Zone Thermal Comfort Fanger Model PMV [](Hourly)'],
dtype=np.float32)
self.zn4PMV_mean = np.mean(self.zn4PMV)
self.zn4PMV_max = np.max(self.zn4PMV)
self.zn4PMV_min = np.min(self.zn4PMV)
'''PMV values for all zones'''
self.allPMV = np.asarray([[self.corePMV], [self.zn1PMV], [self.zn2PMV], [self.zn3PMV], [self.zn4PMV]])
self.allPMV_mean1 = np.mean(self.allPMV, 0)
self.allPMV_mean2 = np.mean(self.allPMV_mean1)
self.allPMV_max = np.amax(self.allPMV)
self.allPMV_min = np.amin(self.allPMV)
'''HVAC power demand (kW)'''
self.hvacPower = np.asarray(newEntry['Whole Building:Facility Total HVAC Electric Demand Power [W](Hourly)'],
dtype=np.float32)
self.hvacPower_ave = np.mean(self.hvacPower)
self.hvacPower_max = np.max(self.hvacPower)
'''Core Zone Cooling Setpoint (C)'''
self.coreCS = np.asarray(newEntry['CORE_ZN:Zone Thermostat Cooling Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.coreCS_mean = np.mean(self.coreCS)
self.coreCS_max = np.max(self.coreCS)
self.coreCS_min = np.min(self.coreCS)
'''Zone 1 Cooling Setpoint (C)'''
self.zn1CS = np.asarray(newEntry['PERIMETER_ZN_1:Zone Thermostat Cooling Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn1CS_mean = np.mean(self.zn1CS)
self.zn1CS_max = np.max(self.zn1CS)
self.zn1CS_min = np.min(self.zn1CS)
'''Zone 2 Cooling Setpoint (C)'''
self.zn2CS = np.asarray(newEntry['PERIMETER_ZN_2:Zone Thermostat Cooling Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn2CS_mean = np.mean(self.zn2CS)
self.zn2CS_max = np.max(self.zn2CS)
self.zn2CS_min = np.min(self.zn2CS)
'''Zone 3 Cooling Setpoint (C)'''
self.zn3CS = np.asarray(newEntry['PERIMETER_ZN_3:Zone Thermostat Cooling Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn3CS_mean = np.mean(self.zn3CS)
self.zn3CS_max = np.max(self.zn3CS)
self.zn3CS_min = np.min(self.zn3CS)
'''Zone 4 Cooling Setpoint (C)'''
self.zn4CS = np.asarray(newEntry['PERIMETER_ZN_4:Zone Thermostat Cooling Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn4CS_mean = np.mean(self.zn4CS)
self.zn4CS_max = np.max(self.zn4CS)
self.zn4CS_min = np.min(self.zn4CS)
'''All Zones Cooling Setpoint (C)'''
self.allCS = np.asarray([[self.coreCS], [self.zn1CS], [self.zn2CS], [self.zn3CS], [self.zn4CS]])
self.allCS_mean1 = np.mean(self.allCS, 1)
self.allCS_mean2 = np.mean(self.allCS_mean1)
self.allCS_max = np.max(self.allCS)
self.allCS_min = np.min(self.allCS)
'''Core Zone Heating Setpoint (C)'''
self.coreHS = np.asarray(newEntry['CORE_ZN:Zone Thermostat Heating Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.coreHS_mean = np.mean(self.coreHS)
self.coreHS_max = np.max(self.coreHS)
self.coreHS_min = np.min(self.coreHS)
'''Zone 1 Heating Setpoint (C)'''
self.zn1HS = np.asarray(newEntry['PERIMETER_ZN_1:Zone Thermostat Heating Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn1HS_mean = np.mean(self.zn1HS)
self.zn1HS_max = np.max(self.zn1HS)
self.zn1HS_min = np.min(self.zn1HS)
'''Zone 2 Heating Setpoint (C)'''
self.zn2HS = np.asarray(newEntry['PERIMETER_ZN_2:Zone Thermostat Heating Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn2HS_mean = np.mean(self.zn2HS)
self.zn2HS_max = np.max(self.zn2HS)
self.zn2HS_min = np.min(self.zn2HS)
'''Zone 3 Heating Setpoint (C)'''
self.zn3HS = np.asarray(newEntry['PERIMETER_ZN_3:Zone Thermostat Heating Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn3HS_mean = np.mean(self.zn3HS)
self.zn3HS_max = np.max(self.zn3HS)
self.zn3HS_min = np.min(self.zn3HS)
'''Zone 4 Heating Setpoint (C)'''
self.zn4HS = np.asarray(newEntry['PERIMETER_ZN_4:Zone Thermostat Heating Setpoint Temperature [C](Hourly)'],
dtype=np.float32)
self.zn4HS_mean = np.mean(self.zn4HS)
self.zn4HS_max = np.max(self.zn4HS)
self.zn4HS_min = np.min(self.zn4HS)
'''All Zones Heating Setpoint (C)'''
self.allHS = np.asarray([[self.coreHS], [self.zn1HS], [self.zn2HS], [self.zn3HS], [self.zn4HS]])
self.allHS_mean1 = np.mean(self.allHS, 1)
self.allHS_mean2 = np.mean(self.allHS_mean1)
self.allHS_max = np.max(self.allHS)
self.allHS_min = np.min(self.allHS)
'''Core Zone Thermostat Temperature (C)'''
self.coreT = np.asarray(newEntry['CORE_ZN:Zone Thermostat Air Temperature [C](Hourly)'],
dtype=np.float32)
self.coreT_mean = np.mean(self.coreT)
self.coreT_max = np.max(self.coreT)
self.coreT_min = np.min(self.coreT)
'''Zone 1 Thermostat Temperature (C)'''
self.zn1T = np.asarray(newEntry['PERIMETER_ZN_1:Zone Thermostat Air Temperature [C](Hourly)'],
dtype=np.float32)
self.zn1T_mean = np.mean(self.zn1T)
self.zn1T_max = np.max(self.zn1T)
self.zn1T_min = np.min(self.zn1T)
'''Zone 2 Thermostat Temperature (C)'''
self.zn2T = np.asarray(newEntry['PERIMETER_ZN_2:Zone Thermostat Air Temperature [C](Hourly)'],
dtype=np.float32)
self.zn2T_mean = np.mean(self.zn2T)
self.zn2T_max = np.max(self.zn2T)
self.zn2T_min = np.min(self.zn2T)
'''Zone 3 Thermostat Temperature (C)'''
self.zn3T = np.asarray(newEntry['PERIMETER_ZN_3:Zone Thermostat Air Temperature [C](Hourly)'],
dtype=np.float32)
self.zn3T_mean = np.mean(self.zn3T)
self.zn3T_max = np.max(self.zn3T)
self.zn3T_min = np.min(self.zn3T)
'''Zone 4 Thermostat Temperature (C)'''
self.zn4T = np.asarray(newEntry['PERIMETER_ZN_4:Zone Thermostat Air Temperature [C](Hourly)'],
dtype=np.float32)
self.zn4T_mean = np.mean(self.zn4T)
self.zn4T_max = np.max(self.zn4T)
self.zn4T_min = np.min(self.zn4T)
'''All Zones Thermostat Temperature (C)'''
self.allT = np.asarray([[self.coreT], [self.zn1T], [self.zn2T], [self.zn3T], [self.zn4T]])
self.allT_mean1 = np.mean(self.allT, 1)
self.allT_mean2 = np.mean(self.allT_mean1)
self.allT_max = np.max(self.allT)
self.allT_min = np.min(self.allT)
|
<filename>src/nlplib/general/unittest.py
''' This module handles unit testing for the package. If this module is ran as a script, it will run all of the tests
for the entire package. These tests are denoted by the <__test__> module level functions. '''
import unittest
import pkgutil
import warnings
__all__ = ['UnitTest', 'mock', 'test_everything']
_log_switch = {None : lambda output : output,
'silent' : lambda output : output,
False : lambda output : output,
'print' : lambda output : print(output),
True : lambda output : print(output)}
def _logging_function (mode) : # This is used in the <nlplib.general.timing> decorator too.
return _log_switch.get(mode, mode)
class UnitTest :
def __init__ (self, log='silent') :
self._test = unittest.TestCase()
self._logging_function = _logging_function(log)
def log (self, output) :
if callable(self._logging_function) :
self._logging_function(output)
return output
def assert_true (self, value) :
self.log('The value is {bool}'.format(bool=bool(value)))
self._test.assertTrue(value)
def assert_equal (self, value_0, value_1) :
comparison = value_0 == value_1
self.log('({value_0} == {value_1}) evaluates to {comparison}'.format(value_0=value_0,
value_1=value_1,
comparison=str(comparison).lower()))
self._test.assertEqual(value_0, value_1)
def assert_raises (self, function, exc) :
self._test.assertRaises(exc, function)
def assert_doesnt_raise (self, function, exc) :
''' This asserts that a function doesn't raise a *specific* exception type. Exceptions that <exc> inherits from
and other exceptions will be treated as passing (they are also thrown silently). '''
try :
function()
except exc as exc_instance :
if type(exc_instance) is exc :
msg = ("The function <{function_name}> is throwing the exception <{exc_name}>. It probably shouldn't "
'be doing this.').format(function_name=function.__name__, exc_name=exc.__name__)
raise self._test.failureException(msg)
except :
pass
def mock (**attrs) :
''' This can be used to make a mock class. '''
class Mock :
def __init__ (self, **attrs) :
for name, value in attrs.items() :
setattr(self.__class__, name, value)
setattr(self, name, value)
return Mock(**attrs)
def _import_everything_from (pkg) :
for loader, name, is_pkg in pkgutil.walk_packages(pkg.__path__, onerror=lambda module : None) :
try :
module = loader.find_module(name).load_module(name)
except NotImplementedError :
...
except ImportError :
msg = "<{}> couldn't be imported.".format(name)
warnings.warn(Warning(msg))
print()
else :
yield module
def test_everything (pkg, log='print', test_function_log='silent', log_non_implemented_tests=False,
raise_not_implemented_error=False, test_function_name='__test__') :
''' This calls all module level functions named <__test__> within a package, in order to expedite package wide unit
testing. '''
ut = UnitTest(log)
test_function_name = str(test_function_name)
for module in _import_everything_from(pkg) :
full_module_name = pkg.__name__ + '.' + module.__name__
try :
test_function = getattr(module, test_function_name)
except AttributeError :
if log_non_implemented_tests :
ut.log('The test for module <%s> is not implemented.' % full_module_name)
else :
ut.log('Testing module <%s>' % full_module_name)
try :
test_function(UnitTest(test_function_log))
except TypeError :
raise TypeError('The test function named <%s> in module <%s> must accept an instance of <UnitTest> as '
'the first argument. Alternatively, there is a chance that the test function is '
'throwing TypeErrors when called.' % (test_function.__name__, full_module_name))
except NotImplementedError :
msg = ('Something in the function named <%s> in the module <%s> is not '
'implemented.' % (test_function.__name__, full_module_name))
if raise_not_implemented_error :
raise NotImplementedError(msg)
else :
ut.log(msg)
else :
ut.log('Module <%s> passed all tests!' % full_module_name)
ut.log('') # Prints a newline
ut.log('Done testing everything, hooray!')
def __test__ (ut) :
mocked = mock(foo='foo',
bar=lambda : 'bar',
baz=lambda baz : 'baz' + baz)
ut.assert_equal(mocked.foo, 'foo')
ut.assert_true(callable(mocked.bar))
ut.assert_equal(mocked.bar(), 'bar')
ut.assert_equal(mocked.baz('baz'), 'bazbaz')
class Foo (Exception) :
...
class Bar (Exception) :
...
class Baz (Bar) :
...
def raise_bar () :
raise Bar
ut.assert_raises(lambda : ut.assert_doesnt_raise(raise_bar, Bar), AssertionError)
ut.assert_doesnt_raise(raise_bar, Exception)
ut.assert_doesnt_raise(raise_bar, Foo)
ut.assert_doesnt_raise(raise_bar, Baz)
if __name__ == '__main__' :
import nlplib
test_everything(nlplib, log='print')
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import math
import sklearn.preprocessing as sk
import seaborn as sns
from sklearn import metrics
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
#import utils
#from myutils import AllTripletSelector,HardestNegativeTripletSelector, RandomNegativeTripletSelector, SemihardNegativeTripletSelector # Strategies for selecting triplets within a minibatch
#from mymetrics import AverageNonzeroTripletsMetric
#from utils import RandomNegativeTripletSelector
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import random
from random import randint
from sklearn.model_selection import StratifiedKFold,RepeatedKFold
from sklearn.metrics import roc_curve, auc
from scipy import interp
import numpy as np
import warnings
import math
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn import preprocessing
warnings.filterwarnings("ignore")
#save_results_to = '/home/winston/MOLI/res/all_6index/nofs/'
#save_results_to = 'F:/MOLI/result/'
save_results_to = 'F:/MOLI/result/res2/'
#torch.manual_seed(42)
random.seed(42)
max_iter = 1
print('AVSB********************************************************************************************')
TCGAEx_cnv = pd.read_csv("brca_cnv_subtype.csv",
index_col=0, header=None)
TCGAEx_rna = pd.read_csv("brca_rna_subtype.csv",
index_col=0, header=None)
TCGAEx_meth = pd.read_csv("brca_meth_subtype.csv",
index_col=0, header=None)
TCGAEx_cnv=TCGAEx_cnv.values
TCGAEx_cnv1=TCGAEx_cnv[:,0:277]
TCGAEx_cnv2=TCGAEx_cnv[:,277:317]
TCGAEx1=np.hstack((TCGAEx_cnv1,TCGAEx_cnv2))
TCGAEx_rna=TCGAEx_rna.values
TCGAEx_rna1=TCGAEx_rna[:,0:277]
TCGAEx_rna2=TCGAEx_rna[:,277:317]
TCGAEx2=np.hstack((TCGAEx_rna1,TCGAEx_rna2))
TCGAEx_meth=TCGAEx_meth.values
TCGAEx_meth1=TCGAEx_meth[:,0:277]
TCGAEx_meth2=TCGAEx_meth[:,277:317]
TCGAEx3=np.hstack((TCGAEx_meth1,TCGAEx_meth2))
#print(TCGAEx)
label=TCGAEx1[0,:]
data1=TCGAEx1[1:,:]
data2=TCGAEx2[1:,:]
data3=TCGAEx3[1:,:]
#print(label)
label[np.where(label=='lminalA')]=1
label[np.where(label=='lminalB')]=0
#label[np.where(label=='TNBC')]=1
#label[np.where(label=='ERBB2')]=1
#label[np.where(label=='normal')]=0
#TCGAE = pd.DataFrame.transpose(TCGAE)
#print(label)
data1=data1.T
data2=data2.T
data3=data3.T
Y=np.array(label.astype(int))
min_max_scaler = preprocessing.MinMaxScaler()
x1 = min_max_scaler.fit_transform(data1)
x2 = min_max_scaler.fit_transform(data2)
x3 = min_max_scaler.fit_transform(data3)
TCGAE1 = SelectKBest(chi2, k=5000).fit_transform(x1, Y)
TCGAE2 = SelectKBest(chi2, k=5000).fit_transform(x2, Y)
TCGAE3 = SelectKBest(chi2, k=5000).fit_transform(x3, Y)
'
#print(Y)
'''
ls_mb_size = [13, 36, 64]
ls_h_dim = [1024, 256, 128, 512, 64, 16]
#ls_h_dim = [32, 16, 8, 4]
ls_marg = [0.5, 1, 1.5, 2, 2.5, 3]
ls_lr = [0.5, 0.1, 0.05, 0.01, 0.001, 0.005, 0.0005, 0.0001,0.00005, 0.00001]
ls_epoch = [20, 50, 90, 100]
ls_rate = [0.3, 0.4, 0.5]
ls_wd = [0.1, 0.001, 0.0001]
ls_lam = [0.1, 0.5, 0.01, 0.05, 0.001, 0.005]
'''
ls_mb_size = [64]
ls_h_dim = [512]
ls_lr = [0.01]
ls_epoch = [100]
ls_rate = [0.5]
ls_wd = [0.001]
skf = StratifiedKFold(n_splits=5, random_state=None,shuffle=True)
rkf = RepeatedKFold(n_splits=5, n_repeats=10, random_state=0)
for iters in range(max_iter):
#print('iters:',iters)
k = 0
mbs = random.choice(ls_mb_size)
hdm = random.choice(ls_h_dim)
#mrg = random.choice(ls_marg)
lre = random.choice(ls_lr)
lrCL = random.choice(ls_lr)
epch = random.choice(ls_epoch)
rate = random.choice(ls_rate)
wd = random.choice(ls_wd)
#lam = random.choice(ls_lam)
costtr_all=[]
costts_all=[]
auctr_all=[]
aucts_all=[]
acctr1_all=[]
accts1_all=[]
acctr2_all=[]
accts2_all=[]
sentr_all=[]
sents_all=[]
spetr_all=[]
spets_all=[]
# 画平均ROC曲线的两个参数
mean_tpr = 0.0 # 用来记录画平均ROC曲线的信息
mean_fpr = np.linspace(0, 1, 100)
#cnt = 0
for repeat in range(10):
for train_index, test_index in skf.split(TCGAE1, Y.astype('int')):
#if(k%5==0):
k = k + 1
X_trainE1 = TCGAE1[train_index,:]
X_trainE2 = TCGAE2[train_index,:]
X_trainE3 = TCGAE3[train_index,:]
X_testE1 = TCGAE1[test_index,:]
X_testE2 = TCGAE2[test_index,:]
X_testE3 = TCGAE3[test_index,:]
y_trainE = Y[train_index]
y_testE = Y[test_index]
TX_testE1 = torch.FloatTensor(X_testE1)
TX_testE2 = torch.FloatTensor(X_testE2)
TX_testE3 = torch.FloatTensor(X_testE3)
ty_testE = torch.FloatTensor(y_testE.astype(int))
#Train
class_sample_count = np.array([len(np.where(y_trainE==t)[0]) for t in np.unique(y_trainE)])
#print(class_sample_count)
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in y_trainE])
samples_weight = torch.from_numpy(samples_weight)
sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight), replacement=True)
mb_size = mbs
trainDataset = torch.utils.data.TensorDataset(torch.FloatTensor(X_trainE1),torch.FloatTensor(X_trainE2),
torch.FloatTensor(X_trainE3),torch.FloatTensor(y_trainE.astype(int)))
trainLoader = torch.utils.data.DataLoader(dataset = trainDataset, batch_size=mb_size, shuffle=False, num_workers=0, sampler = sampler)
n_sampE1, IE1_dim = X_trainE1.shape
n_sampE2, IE2_dim = X_trainE2.shape
n_sampE3, IE3_dim = X_trainE3.shape
h_dim1 = hdm
h_dim2 = hdm
h_dim3 = hdm
Z_in = h_dim1+h_dim2+h_dim3
#marg = mrg
lrE = lre
epoch = epch
costtr = []
auctr = []
costts = []
aucts = []
acctr1=[]
accts1=[]
acctr2=[]
accts2=[]
acc0tr=[]
acc0ts=[]
sentr=[]
sents=[]
spetr=[]
spets=[]
class AEE(nn.Module):
def __init__(self):
super(AEE, self).__init__()
self.EnE = torch.nn.Sequential(
nn.Linear(IE1_dim, h_dim1),
nn.BatchNorm1d(h_dim1),
nn.ReLU(),
nn.Dropout(rate))
def forward(self, x):
output = self.EnE(x)
return output
class AEM(nn.Module):
def __init__(self):
super(AEM, self).__init__()
self.EnM = torch.nn.Sequential(
nn.Linear(IE2_dim, h_dim2),
nn.BatchNorm1d(h_dim2),
nn.ReLU(),
nn.Dropout(rate))
def forward(self, x):
output = self.EnM(x)
return output
class AEC(nn.Module):
def __init__(self):
super(AEC, self).__init__()
self.EnC = torch.nn.Sequential(
nn.Linear(IE3_dim, h_dim3),
nn.BatchNorm1d(h_dim3),
nn.ReLU(),
nn.Dropout(rate))
def forward(self, x):
output = self.EnC(x)
return output
class OnlineTriplet(nn.Module):
def __init__(self, marg, triplet_selector):
super(OnlineTriplet, self).__init__()
self.marg = marg
self.triplet_selector = triplet_selector
def forward(self, embeddings, target):
triplets = self.triplet_selector.get_triplets(embeddings, target)
return triplets
class OnlineTestTriplet(nn.Module):
def __init__(self, marg, triplet_selector):
super(OnlineTestTriplet, self).__init__()
self.marg = marg
self.triplet_selector = triplet_selector
def forward(self, embeddings, target):
triplets = self.triplet_selector.get_triplets(embeddings, target)
return triplets
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.FC = torch.nn.Sequential(
nn.Linear(Z_in, 1),
nn.Dropout(rate),
nn.Sigmoid())
def forward(self, x):
return self.FC(x)
torch.cuda.manual_seed_all(42)
AutoencoderE1 = AEE()
AutoencoderE2 = AEM()
AutoencoderE3 = AEC()
solverE1 = optim.Adagrad(AutoencoderE1.parameters(), lr=lrE)
solverE2 = optim.Adagrad(AutoencoderE2.parameters(), lr=lrE)
solverE3 = optim.Adagrad(AutoencoderE3.parameters(), lr=lrE)
Clas = Classifier()
SolverClass = optim.Adagrad(Clas.parameters(), lr=lrCL, weight_decay = wd)
C_loss = torch.nn.BCELoss()
print('epoch_all',epoch)
for it in range(epoch):
#print('epoch:',it)
epoch_cost4 = []
epoch_cost3 = []
p_real=[]
p_pred=[]
n_real=[]
n_pred=[]
p_realt=[]
p_predt=[]
n_realt=[]
n_predt=[]
num_minibatches = int(n_sampE1 / mb_size)
for i, (dataE1,dataE2,dataE3, target) in enumerate(trainLoader):
flag = 0
AutoencoderE1.train()
AutoencoderE2.train()
AutoencoderE3.train()
Clas.train()
if torch.mean(target)!=0. and torch.mean(target)!=1.:
ZEX1 = AutoencoderE1(dataE1)
ZEX2 = AutoencoderE2(dataE2)
ZEX3 = AutoencoderE3(dataE3)
ZT = torch.cat((ZEX1, ZEX2, ZEX3), 1)
ZT = F.normalize(ZT, p=2, dim=0)
Pred = Clas(ZT)
loss=C_loss(Pred,target.view(-1,1))
y_true = target.view(-1,1)
y_pred = Pred
#AUC
AUC = roc_auc_score(y_true.detach().numpy(),y_pred.detach().numpy())
#print('AUC:',AUC)
#print('LOSS:',loss)
#acc
mask = y_pred.ge(0.5).float()
for i in range(len(y_true)):
if(y_true[i]==1):
p_real.append(y_true[:,0][i])
p_pred.append(mask[:,0][i])
else:
n_real.append(y_true[:,0][i])
n_pred.append(mask[:,0][i])
solverE1.zero_grad()
solverE2.zero_grad()
solverE3.zero_grad()
SolverClass.zero_grad()
loss.backward()
solverE1.step()
solverE2.step()
solverE3.step()
SolverClass.step()
epoch_cost4.append(loss)
epoch_cost3.append(AUC)
flag = 1
if flag == 1:
costtr.append(torch.mean(torch.FloatTensor(epoch_cost4)))
auctr.append(np.mean(epoch_cost3))
#print('Iter-{}; Total loss: {:.4}'.format(it, loss))
p_pred=torch.FloatTensor(p_pred)
p_real=torch.FloatTensor(p_real)
n_pred=torch.FloatTensor(n_pred)
n_real=torch.FloatTensor(n_real)
tp=(p_pred==p_real).sum().float()
fn=len(p_real)-tp
tn=(n_pred==n_real).sum().float()
fp=len(n_real)-tn
acc1=(tp+tn)/(len(p_real)+len(n_real))
sen=tp/len(p_real)
spe=tn/len(n_real)
acc2=(sen+spe)/2
acctr1.append(acc1)
acctr2.append(acc2)
sentr.append(sen)
spetr.append(spe)
with torch.no_grad():
AutoencoderE1.eval()
AutoencoderE2.eval()
AutoencoderE3.eval()
Clas.eval()
#ZET = AutoencoderE(TX_testE)
ZET1 = AutoencoderE1(TX_testE1)
ZET2 = AutoencoderE2(TX_testE2)
ZET3 = AutoencoderE3(TX_testE3)
ZTT = torch.cat((ZET1, ZET2, ZET3), 1)
ZTT = F.normalize(ZTT, p=2, dim=0)
PredT = Clas(ZTT)
lossT=C_loss(PredT,ty_testE.view(-1,1))
y_truet = ty_testE.view(-1,1)
y_predt = PredT
AUCt = roc_auc_score(y_truet.detach().numpy(),y_predt.detach().numpy())
#acc
maskt = y_predt.ge(0.5).float()
for i in range(len(y_truet)):
if(y_truet[i]==1):
p_realt.append(y_truet[:,0][i])
p_predt.append(maskt[:,0][i])
else:
n_realt.append(y_truet[:,0][i])
n_predt.append(maskt[:,0][i])
p_predt=torch.FloatTensor(p_predt)
p_realt=torch.FloatTensor(p_realt)
n_predt=torch.FloatTensor(n_predt)
n_realt=torch.FloatTensor(n_realt)
tp=(p_predt==p_realt).sum().float()
fn=len(p_realt)-tp
tn=(n_predt==n_realt).sum().float()
fp=len(n_realt)-tn
acc1=(tp+tn)/len(y_truet)
sen=tp/len(p_realt)
spe=tn/len(n_realt)
acc2=(sen+spe)/2
accts1.append(acc1)
accts2.append(acc2)
sents.append(sen)
spets.append(spe)
costts.append(lossT)
aucts.append(AUCt)
if(it==epoch-1):
#print('y_true:',y_true)
#print('mask:',mask)
#print('p_real:',p_real)
#print('p_pred:',p_pred)
#print('n_real:',n_real)
#print('n_pred:',n_pred)
#print('p_realt:',p_realt)
#print('p_predt:',p_predt)
#print('n_realt:',n_realt)
#print('n_predt:',n_predt)
print('acctest1:',acc1)
print('acctest2:',acc2)
print('sentest:',sen)
print('spetest:',spe)
#ROC
fpr, tpr, thresholds = roc_curve(y_truet, y_predt)
mean_tpr += np.interp(mean_fpr, fpr, tpr) # 插值函数 interp(x坐标,每次x增加距离,y坐标) 累计每次循环的总值后面求平均值
if(k%5==0):
temp_mean_tpr=mean_tpr/k
mean_tpr[0] = 0.0 # 将第一个真正例=0 以0为起点
#roc_auc = auc(fpr, tpr) # 求auc面积
mean_auc = auc(mean_fpr, temp_mean_tpr)
plt.plot(mean_fpr, temp_mean_tpr, label='Mean ROC repeats {0:.1f} (area = {1:.2f})'.format(k//5,mean_auc), lw=1)
#plt.plot(fpr, tpr, lw=1, label='ROC fold {0:.2f} (area = {1:.2f})'.format(k, roc_auc)) # 画出当前分割数据的ROC曲线
costtr_all.append(costtr)
auctr_all.append(auctr)
costts_all.append(costts)
aucts_all.append(aucts)
acctr1_all.append(acctr1)
accts1_all.append(accts1)
acctr2_all.append(acctr2)
accts2_all.append(accts2)
sentr_all.append(sentr)
sents_all.append(sents)
spetr_all.append(spetr)
spets_all.append(spets)
plt.plot(np.squeeze(costtr), '-r',np.squeeze(costts), '-b')
plt.ylabel('Total cost')
plt.xlabel('iterations (per tens)')
title = 'Cost iter = {}, fold = {}, mb_size = {}, h_dim = {} , lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}'.\
format(iters, k, mbs, hdm, lre, epch, rate, wd, lrCL)
#title = 'Cost iter = {}, fold = {}, mb_size = {}, h_dim = {}, marg = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, lam = {}'.\
# format(iters, k, mbs, hdm, mrg, lre, epch, rate, wd, lrCL, lam)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
plt.plot(np.squeeze(auctr), '-r',np.squeeze(aucts), '-b')
plt.ylabel('AUC')
plt.xlabel('iterations (per tens)')
title = 'AUC iter = {}, fold = {}, mb_size = {}, h_dim = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, '.\
format(iters, k, mbs, hdm, lre, epch, rate, wd, lrCL)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
if k%50==0:
#ROC-mean
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck') # 画对角线
mean_tpr /= k # 求数组的平均值
mean_tpr[-1] = 1.0 # 坐标最后一个点为(1,1) 以1为终点
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',label='Mean ROC (area = {0:.2f})'.format(mean_auc), lw=2)
plt.xlim([-0.05, 1.05]) # 设置x、y轴的上下限,设置宽一点,以免和边缘重合,可以更好的观察图像的整体
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate') # 可以使用中文,但需要导入一些库即字体
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
title='all-ROC(A=1,B=0)-skf10 '
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
costtr_all=np.array(costtr_all)
auctr_all=np.array(auctr_all)
costts_all=np.array(costts_all)
aucts_all=np.array(aucts_all)
acctr1_all=np.array(acctr1_all)
accts1_all=np.array(accts1_all)
acctr2_all=np.array(acctr2_all)
accts2_all=np.array(accts2_all)
sentr_all=np.array(sentr_all)
sents_all=np.array(sents_all)
spetr_all=np.array(spetr_all)
spets_all=np.array(spets_all)
#ctr=sum(costtr_all)/5
#atr=sum(auctr_all)/5
#cts=sum(costts_all)/5
#ats=sum(aucts_all)/5
print(costtr_all)
costtr5=sum(costtr_all)/k
costts5=sum(costts_all)/k
auctr5=sum(auctr_all)/k
aucts5=sum(aucts_all)/k
acctr15=sum(acctr1_all)/k
accts15=sum(accts1_all)/k
acctr25=sum(acctr2_all)/k
accts25=sum(accts2_all)/k
sentr5=sum(sentr_all)/k
sents5=sum(sents_all)/k
spetr5=sum(spetr_all)/k
spets5=sum(spets_all)/k
print('acc1-200:',accts15[-1])
print('acc1max:',max(accts15))
print('acc2-200:',accts25[-1])
print('acc2max:',max(accts25))
print('cost200:',costts5[-1])
print('costmin:',min(costts5))
print('auc200:',aucts5[-1])
print('aucmax:',max(aucts5))
print('acc1:',accts15)
print('acc2:',accts25)
print('cost:',costts5)
print('auc:',aucts5)
#ctr=ctr/5
#atr=atr/5
#cts=cts/5
#ats=ats/5
plt.plot(np.squeeze(costtr5), '-r',np.squeeze(costts5), '-b')
plt.ylabel('Total cost')
plt.xlabel('epoch')
title = 'all_ Cost(A=1,B=0)-skf10 mb_size = {}, h_dim = {} , lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}'.\
format( mbs, hdm, lre, epch, rate, wd, lrCL)
#title = 'Cost iter = {}, fold = {}, mb_size = {}, h_dim = {}, marg = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, lam = {}'.\
# format(iters, k, mbs, hdm, mrg, lre, epch, rate, wd, lrCL, lam)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
plt.plot(np.squeeze(auctr5), '-r',np.squeeze(aucts5), '-b')
plt.ylabel('AUC')
plt.xlabel('epoch')
title = 'all_ AUC(A=1,B=0)-skf10 mb_size = {}, h_dim = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, '.\
format( mbs, hdm, lre, epch, rate, wd, lrCL)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
plt.plot(np.squeeze(acctr5), '-r',np.squeeze(accts5), '-b')
plt.ylabel('Accuracy')
plt.xlabel('epoch')
title = 'all_ Accuracy(A=1,B=0)-skf10 mb_size = {}, h_dim = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, '.\
format( mbs, hdm, lre, epch, rate, wd, lrCL)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
plt.plot(np.squeeze(sentr5), '-r',np.squeeze(sents5), '-b')
plt.ylabel('sensitivity')
plt.xlabel('epoch')
title = 'all_ sensitivity(A=1,B=0)-skf10 mb_size = {}, h_dim = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, '.\
format( mbs, hdm, lre, epch, rate, wd, lrCL)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
plt.plot(np.squeeze(spetr5), '-r',np.squeeze(spets5), '-b')
plt.ylabel('specificity')
plt.xlabel('epoch')
title = 'all_ specificity(A=1,B=0)-skf10 mb_size = {}, h_dim = {}, lrE = {}, epoch = {}, rate = {}, wd = {}, lrCL = {}, '.\
format( mbs, hdm, lre, epch, rate, wd, lrCL)
plt.suptitle(title)
plt.savefig(save_results_to + title + '.png', dpi = 150)
plt.close()
|
<reponame>openvax/isovar<filename>test/test_variant_reads_with_dummy_samfile.py
from __future__ import print_function, division, absolute_import
from nose.tools import eq_
from varcode import Variant
from isovar.allele_read import AlleleRead
from isovar.read_collector import ReadCollector
from mock_objects import MockAlignmentFile, make_pysam_read
from genomes_for_testing import grch38
def test_partitioned_read_sequences_snv():
"""
test_partitioned_read_sequences_snv : Test that read gets correctly
partitioned for chr1:4 T>G where the sequence for chr1 is assumed
to be "ACCTTG"
"""
# chr1_seq = "ACCTTG"
chromosome = "1"
location = 4
ref = "T"
alt = "G"
variant = Variant(
chromosome,
location,
ref,
alt,
grch38,
normalize_contig_names=False)
read = make_pysam_read(seq="ACCGTG", cigar="6M", mdtag="3G2")
samfile = MockAlignmentFile(
references=(chromosome,),
reads=[read])
read_creator = ReadCollector()
variant_reads = read_creator.allele_reads_supporting_variant(
variant=variant,
alignment_file=samfile)
print(variant_reads)
assert len(variant_reads) == 1
variant_read = variant_reads[0]
expected = AlleleRead(
name=read.qname,
prefix="ACC",
allele="G",
suffix="TG")
eq_(variant_read, expected)
def test_partitioned_read_sequences_insertion():
"""
test_partitioned_read_sequences_insertion : Test that read gets correctly
partitioned for chr1:4 T>TG
where the sequence for chr1 is assumed to be "ACCTTG"
and the variant sequence is "ACCTGTG"
"""
# chr1_seq = "ACCTTG"
chromosome = "1"
location = 4
ref = "T"
alt = "TG"
variant = Variant(
chromosome, location, ref, alt, grch38, normalize_contig_names=False)
read = make_pysam_read(
seq=b"ACCTGTG",
cigar="4M1I2M",
mdtag="6")
samfile = MockAlignmentFile(
references=(chromosome,),
reads=[read])
read_creator = ReadCollector()
variant_reads = read_creator.allele_reads_supporting_variant(
alignment_file=samfile,
variant=variant)
print(variant_reads)
assert len(variant_reads) == 1
variant_read = variant_reads[0]
expected = AlleleRead(
name=read.qname,
prefix="ACCT",
allele="G",
suffix="TG")
eq_(variant_read, expected)
def test_partitioned_read_sequences_deletion():
"""
test_partitioned_read_sequences_deletion : Test that read gets correctly
partitioned for chr1:4 TT>T where the sequence for chr1 is assumed to
be "ACCTTG"
"""
# chr1_seq = "ACCTTG"
chromosome = "1"
location = 4
ref = "TT"
alt = "T"
variant = Variant(
chromosome, location, ref, alt, grch38, normalize_contig_names=False)
read = make_pysam_read(
seq="ACCTG",
cigar="4M1D1M",
mdtag="4^T1")
samfile = MockAlignmentFile(
references=(chromosome,),
reads=[read])
read_creator = ReadCollector()
variant_reads = read_creator.allele_reads_supporting_variant(
alignment_file=samfile,
variant=variant)
print(variant_reads)
assert len(variant_reads) == 1
variant_read = variant_reads[0]
expected = AlleleRead(
name=read.qname,
prefix="ACCT",
allele="",
suffix="G")
eq_(variant_read, expected)
|
<reponame>chapuzzo/quicktracer<filename>quicktracer/displays.py
from collections import deque
import pyqtgraph as pg
# Protocol constants (duplicated because of import problems)
KEY = 'k'
VALUE = 'v'
TIME = 't'
CUSTOM_DISPLAY = 'custom_display'
DEFAULT_MAX_DATA_SERIES_LENGTH = 1000
view_boxes = {}
class Display():
# Override these
def __init__(self):
self.title = None
self.view_box = None
self.view_box_id = None
@classmethod
def accepts_value(cls, value):
return True
def add_value(self, message):
pass
def init_view_box(self, view_box):
pass
def render(self):
pass
# Shouldn't need to override these ones
def set_title(self, title):
self.title = title
def set_view_box_id(self, view_box_id):
self.view_box_id = view_box_id
def render_with_init(self, win):
if not self.view_box:
global view_boxes
if self.view_box_id:
if self.view_box_id in view_boxes:
self.view_box = view_boxes[self.view_box_id]
self.view_box.setTitle(self.view_box_id)
else:
win.nextRow()
self.view_box = win.addPlot(title=self.title)
view_boxes[self.view_box_id] = self.view_box
else:
win.nextRow()
self.view_box = win.addPlot(title=self.title)
self.init_view_box(self.view_box)
self.render()
# Built-in stuff below.
num_plots_in_window = 0
def new_row_id():
global num_plots_in_window
print('aa', num_plots_in_window)
num_plots_in_window += 1
return num_plots_in_window
class TimeseriesPlot(Display):
def __init__(self):
super().__init__()
self.value_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)
self.time_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)
@classmethod
def accepts_value(cls, value):
return is_number(value)
def add_value(self, message):
self.time_data.append(message[TIME])
self.value_data.append(float(message[VALUE]))
def init_view_box(self, view_box):
view_box.showAxis('left', False)
view_box.showAxis('right', True)
self.curve = view_box.plot()
def render(self):
self.curve.setData(self.time_data, self.value_data)
class XYPlot(Display):
def __init__(self):
super().__init__()
self.x_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)
self.y_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)
self.vector_data = deque([], maxlen=DEFAULT_MAX_DATA_SERIES_LENGTH)
self.curve = None
self.curve_point = None
@classmethod
def accepts_value(cls, value):
return is_vector(value)
def add_value(self, message):
vector = message[VALUE]
assert is_vector(vector)
self.x_data.append(vector[0])
self.y_data.append(vector[1])
self.vector_data.append(vector)
def init_view_box(self, view_box):
self.curve = view_box.plot()
self.curve.setData(self.x_data, self.y_data)
self.curve_point = pg.CurvePoint(self.curve)
view_box.addItem(self.curve_point)
self.point_label = pg.TextItem('[?, ?]', anchor=(0.5, -1.0))
self.point_label.setParentItem(self.curve_point)
arrow2 = pg.ArrowItem(angle=90)
arrow2.setParentItem(self.curve_point)
def render(self):
index = min(len(self.x_data), len(self.y_data))-1
self.curve.setData(self.x_data, self.y_data)
self.curve_point.setIndex(0) # Force a redraw if if the length doesn't change
self.curve_point.setIndex(index)
self.point_label.setText('[{}]'.format(
', '.join([ '{:0.1f}'.format(val) for val in self.vector_data[index] ])
))
def is_number(s):
try:
float(s)
return True
except Exception:
return False
def is_vector(vector):
try:
assert len(vector) >= 2
assert is_number(vector[0])
assert is_number(vector[1])
return True
except Exception:
return False
default_display_classes = [
TimeseriesPlot,
XYPlot,
]
|
<gh_stars>0
#!/usr/bin/env python
"""
update_dreqs_0194.py
Create an issue for EC-Earth3P-HR highresSST-future r1i1p1f1 v20190514 about
a metadata issue.
"""
import argparse
import logging.config
import os
import sys
from cf_units import date2num, CALENDAR_GREGORIAN
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.utils.replace_file import replace_files
from pdata_app.models import DataFile, DataIssue
from pdata_app.utils.common import delete_drs_dir
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
def delete_files(query_set):
"""
Delete any files online from the specified queryset
"""
directories_found = []
for df in query_set.filter(online=True):
try:
os.remove(os.path.join(df.directory, df.name))
except OSError as exc:
logger.error(str(exc))
else:
if df.directory not in directories_found:
directories_found.append(df.directory)
df.online = False
df.directory = None
df.save()
for directory in directories_found:
if not os.listdir(directory):
delete_drs_dir(directory)
logger.debug('{} directories removed'.format(len(directories_found)))
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
gijs = User.objects.get(username='gvdoord')
hist_txt = (
'The parent_source_id attribute in these files should be EC-Earth3P-'
'HR. This is a purely a metadata issue and the data itself is '
'not affected. This will be corrected before publication to ESGF.'
)
hist_issue, _created = DataIssue.objects.get_or_create(issue=hist_txt,
reporter=gijs)
affected_files = DataFile.objects.filter(
climate_model__short_name='EC-Earth3P-HR',
experiment__short_name='highresSST-future',
rip_code='r1i1p1f1',
version='v20190514',
)
logger.debug('{} affected files found'.format(affected_files.count()))
hist_issue.data_file.add(*affected_files)
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
from atlas_helper_methods import AtlasHelper
import requests
from requests.exceptions import ConnectionError, HTTPError, Timeout
import collections
from collections import defaultdict
import sys
import json
from json import loads
import memcache
import threading
from threading import Thread, Lock
from aws_helper import AwsHelper
from aws_module import AwsModule
class GraphiteHelper():
def __init__(self, request=None, environment=None):
self.module = 'graphite_module'
self.ah_obj = AtlasHelper()
self.aws_helperobj = AwsHelper()
self.module_config_data = self.ah_obj.get_atlas_configuration_data(self.module)
self.graphite_url = " "
self.framework = ""
self.parameters_list = []
self.time_interval = 0.0
self.server_monitored = []
self.format = ""
self.from_time = ""
self.to_time = ""
self.memcache_var = memcache.Client([self.ah_obj.get_atlas_config_data("global_config_data",'memcache_server_location')], debug=0)
if environment is not None:
self.aws_moduleobj = AwsModule(request=request,environment=environment)
def get_subnet_list(self, environment):
"""
Get the subnets for environment which has instances and decide if an attribute should be displayed on a subnet.
"""
if environment != 'uncategorized':
subnets_with_instances = self.aws_moduleobj.get_information(environment, subnets_with_instances='true')
subnet_list = []
for subnet, stack_list in subnets_with_instances.iteritems():
for attribute, attr_details in self.module_config_data['stack_attributes'].iteritems():
if attr_details['stack'] == 'all' or set(attr_details['stack']).issubset(set(stack_list)):
if subnet not in subnet_list: subnet_list.append(subnet)
return subnet_list
def get_query_parameters(self):
"""Get the query parameters from atlas config yaml"""
self.graphite_url = self.module_config_data['others']['graphite_url']+"render/?"
self.framework = self.module_config_data['others']['framework']
self.servers_monitored = self.module_config_data['others']['server_name']
self.database = self.module_config_data['others']['database']
self.time_interval = self.module_config_data['others']['time_duration']
if 'from' in self.time_interval: self.from_time = self.time_interval['from']
if 'to' in self.time_interval: self.to_time = self.time_interval['to']
if self.to_time is not None and self.from_time is not None:
self.time_string = "&from="+str(self.from_time)+"&to="+str(self.to_time)
if self.from_time is None:
self.time_string = "&to="+str(self.to_time)
if self.to_time is None:
self.time_string = "&from="+str(self.from_time)
self.parameters_list = self.module_config_data['others']['parameters']
self.format = self.module_config_data['others']['format']
def queries_for_graphite(self, subnet_list):
"""Construct queries for grahite"""
query_dict = collections.defaultdict(dict)
self.get_query_parameters()
for subnet in subnet_list:
for server in self.servers_monitored:
for parameter in self.parameters_list:
target = self.framework+"."+subnet+".ms."+server+"."+self.database+"."+parameter
query_dict[subnet][parameter] = self.graphite_url+"target="+target+self.time_string+"&format="+self.format
return dict(query_dict)
def generate_report(self, query):
"""Retrieve query results from the graphite server."""
try:
report_json = {}
response = requests.get(query)
if response.status_code == 200:
report_json = json.loads(response.text) #convert the json into a python dictionary
return report_json
except ConnectionError as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("graphite_helper.py", "generate_report()", exp_object, exc_type, exc_obj, exc_tb)
except HTTPError as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("graphite_helper.py", "generate_report()", exp_object, exc_type, exc_obj, exc_tb)
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("graphite_helper.py", "generate_report()", exp_object, exc_type, exc_obj, exc_tb)
return {}
def get_stack_attributes(self, environment):
"""Get all stack attributes."""
stack_attribute_list, stack_attribute_dict = [], {}
for attribute, details in self.module_config_data['stack_attributes'].iteritems():
stack_attribute_list.append((details['display_name'], details['editable']))
stack_attribute_dict[details['display_name']] = details
return(stack_attribute_list, stack_attribute_dict)
def get_stack_attribute_values(self, environment):
"""Get stack attribute values from cache. If it does not exists get it from the the global cache."""
stack_attribute_values = self.memcache_var.get(str(environment+"graphite_stack_attributes"))
if not stack_attribute_values:
stack_attributes_values = self.memcache_var.get(str(environment+"global_graphite_stack_attributes"))
if stack_attribute_values is not None:
self.memcache_var.set(str(environment+"graphite_stack_attributes"), stack_attribute_values, 10*60)
with threading.Lock():
thread = threading.Thread(target=self.cache_stack_attribute_values, args=[environment])
thread.start()
return stack_attribute_values
def cache_stack_attribute_values(self, environment):
"""Cache stack attribute values."""
try:
stack_attribute_values = self.stack_attribute_values(environment)
self.memcache_var.set(str(environment+"graphite_stack_attributes"), stack_attribute_values, 10*60)
if stack_attribute_values is None:
raise Exception("The graphite attribute values for environment "+environment+" has not been fetched. Please make sure the cache is populated !!!")
if stack_attribute_values is not None:
self.memcache_var.set(str(environment+"global_graphite_stack_attributes"),stack_attribute_values, 15*60)
self.memcache_var.disconnect_all()
except Exception as exp_object:
exc_type, exc_obj, exc_tb = sys.exc_info()
self.ah_obj.print_exception("graphite_helper.py", "cache_stack_attribute_values()", exp_object, exc_type, exc_obj, exc_tb)
return {}
def stack_attribute_values(self, environment):
"""get stack attribute values from graphite server and parse it."""
if environment != 'uncategorized':
stack_attribute_dict = self.ah_obj.create_nested_defaultdict()
organization_list = self.aws_helperobj.get_organizations()
region_list = self.aws_helperobj.get_regions()
stack_attributes_from_config = self.module_config_data['stack_attributes']
attributes_list = stack_attributes_from_config.keys()
subnet_list = self.get_subnet_list(environment)
graphite_query_dict = self.queries_for_graphite(subnet_list)
for organization in organization_list:
for region in region_list:
vpc_list = self.aws_helperobj.get_vpc_in_region(region)
if vpc_list:
for vpc in vpc_list:
for subnet in subnet_list:
for attribute in stack_attributes_from_config:
stack_list = stack_attributes_from_config[attribute]['stack']
attribute_value=""
suffix=""
if 'suffix' in stack_attributes_from_config[attribute]:
suffix = stack_attributes_from_config[attribute]['suffix']
display_name= ""
if 'display_name' in stack_attributes_from_config[attribute]:
display_name = stack_attributes_from_config[attribute]['display_name']
report = self.generate_report(graphite_query_dict[subnet][attribute])
if report:
target = self.ah_obj.split_string(report[0]['target'], ('.'))
if subnet in target and attribute in target:
for index in range(len(report[0]['datapoints'])-1, 0, -1):
if report and report[0]['datapoints'][index][0] is not None:
attribute_value = str(int(report[0]['datapoints'][index][0]))+" "+suffix
break
else: attribute_value = "null"
else:attribute_value = "null"
for stack in stack_list:
stack_attribute_dict[region][vpc][subnet][stack][display_name] = attribute_value
return self.ah_obj.defaultdict_to_dict(stack_attribute_dict)
|
<filename>notebooks/Python/2 Statistical Learning/2.3 Lab - Introduction to Python.py
# coding: utf-8
# ## 2.3 Lab: Introduction to Python
# ### 2.3.1 Basic Commands
# In[1]:
import numpy as np # for calculation purpose, let use np.array
import random # for the random
# In[2]:
x = np.array([1, 3, 2, 5])
# In[3]:
print(x)
# In[4]:
x = np.array([1, 6, 2])
# In[5]:
print(x)
# In[6]:
y = np.array([1, 4, 3])
# In[7]:
len(x)
# In[8]:
len(y)
# In[9]:
print(x + y)
# In[10]:
get_ipython().run_line_magic('whos', '')
# In[11]:
del x # reset_selective x
# In[12]:
get_ipython().run_line_magic('whos', '')
# In[13]:
get_ipython().run_line_magic('pinfo', 'reset')
# In[14]:
x = np.array([1, 2, 3, 4])
x = np.reshape(x, (2, 2), order='F')
# In[15]:
print(x)
# In[16]:
x = np.array([1, 2, 3, 4])
x = np.reshape(x, (2, 2))
# In[17]:
print(x)
# In[18]:
x = np.matrix([[1, 2], [3, 4]])
# In[19]:
print(x)
# In[20]:
print(np.sqrt(x))
# In[21]:
print(x**2)
# In[22]:
print(np.square(x))
# In[23]:
mu, sigma = 0, 1 # mean and standard deviation
# In[24]:
x = np.random.normal(mu, sigma, 50)
# In[25]:
y = x + np.random.normal(50, 0.1, 50)
# In[26]:
print(np.corrcoef(x, y))
# In[27]:
np.random.seed(1303)
# In[28]:
print(np.random.normal(mu, sigma, 50)) # after set up the seed, this should genernate the same result
# In[29]:
np.random.seed(1303)
y = np.random.normal(mu, sigma, 100)
# In[30]:
print(np.mean(y))
# In[31]:
print(np.var(y))
# In[32]:
print(np.sqrt(np.var(y)))
# In[33]:
print(np.std(y))
# ### 2.3.2 Graphics
# In[34]:
# In python, matplotlib is the most used library for plot
# matplotlib.pyplot is a collection of command style functions that make matplotlib work like MATLAB.
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[35]:
x = np.random.normal(0, 1, 100)
y = np.random.normal(0, 1, 100)
# In[36]:
plt.plot(x, y, 'bo') # please use plt.plot? to look at more options
plt.ylabel("this is the y-axis")
plt.xlabel("this is the x-axis")
plt.title("Plot of X vs Y")
plt.savefig('../../../output/Figure.pdf') # use plt.savefig function to save images
plt.show()
# In[37]:
x = np.arange(1, 11) # note the arange excludes right end of range specification
# In[38]:
print(x)
# In[39]:
# in order to use Pi, math module needs to be loaded first
import math
x = np.linspace(-math.pi, math.pi, num = 50)
# In[40]:
print(x)
# In[41]:
y = x
X, Y = np.meshgrid(x,y)
# In[42]:
f = np.cos(Y)/(1 + np.square(X))
CS = plt.contour(X, Y, f)
plt.show()
# In[43]:
fa = (f - f.T)/2 #f.T for transpose or tranpose(f)
plt.imshow(fa, extent=(x[0], x[-1], y[0], y[-1]))
plt.show()
# In[44]:
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.plot_wireframe(X, Y, fa)
plt.show()
# ### 2.3.3 Indexing Data
# In[45]:
A = np.arange(1,17,1).reshape(4, 4).transpose()
# In[46]:
print(A)
# In[47]:
# R starts the index from 1, but Python starts the index from 0.
# To select the same number (10) as the book did, we need to reduce the index by 1
print(A[1, 2])
# In[48]:
print(A[[[0],[2]], [1,3]])
# In[49]:
print(A[0:3, 1:4])
# In[50]:
print(A[0:2, :])
# In[51]:
print(A[:, 0:2])
# In[52]:
print(A[0,:])
# In[53]:
# minus sign has a different meaning in Python.
# This means index from the end.
# -1 means the last element
print(A[-1, -1])
# In[54]:
A.shape
# ### 2.3.4 Loading Data
# In[55]:
# In Python, Pandas is a common used module to read from file into a data frame.
import pandas as pd
Auto = pd.read_csv('../../../data/Auto.csv', header=0, na_values='?')
# In[56]:
Auto.head()
# In[57]:
Auto.shape
# In[58]:
Auto.iloc[32]
# In[59]:
Auto.iloc[:4, :2]
# In[60]:
Auto.columns
# In[61]:
list(Auto)
# In[62]:
# Use .isnull and .sum to find out how many NaNs in each variables
Auto.isnull().sum()
# In[63]:
# There are 397 rows in the data and only 5 with missing values.
# We can just drop the ones with missing values.
Auto = Auto.dropna()
# In[64]:
Auto.shape
# ### 2.3.5 Additional Graphical and Numerical Summaries
# In[65]:
plt.plot(Auto.cylinders, Auto.mpg, 'ro')
plt.show()
# In[66]:
Auto.hist(column = ['cylinders', 'mpg'])
plt.show()
# In[67]:
import seaborn as sns
# In[68]:
sns.pairplot(Auto)
plt.show()
# In[69]:
sns.pairplot(Auto, vars = ['mpg', 'displacement', 'horsepower', 'weight', 'acceleration'])
plt.show()
# In[70]:
Auto.describe()
# In[71]:
Auto.describe(include = 'all')
# In[72]:
# Change the cylinders into categorical variable
Auto['cylinders'] = Auto['cylinders'].astype('category')
# In[73]:
Auto.describe(include= 'all')
|
<reponame>Itskaleem/NetworkSimulator<gh_stars>0
from scipy.constants import Planck, pi, c
from scipy.special import erfcinv
import numpy as np
import pandas as pd
import json
from random import shuffle
import matplotlib.pyplot as plt
import itertools as it
import copy
class Lightpath(object):
def __init__(self, path: str, channel=0, rs=32e9, df=50e9, transceiver ='shannon'):
self._signal_power = None
self._path = path
self._channel = channel
self._rs = rs
self._df = df
self._noise_power = 0
self._snr = None
self._latency = 0
self._optimized_powers = {}
self._transceiver = transceiver
self._bitrate = None
@property
def transceiver(self):
return self._transceiver
@transceiver.setter
def transceiver(self, transceiver):
self._transceiver = transceiver
@property
def bitrate(self):
return self._bitrate
@bitrate.setter
def bitrate(self, bitrate):
self._bitrate = bitrate
@property
def signal_power(self):
return self._signal_power
@property
def optimized_powers(self):
return self._optimized_powers
@optimized_powers.setter
def optimized_powers(self, optimized_powers):
self._optimized_powers = optimized_powers
@property
def snr(self):
return self._snr
@snr.setter
def snr(self, snr):
self._snr = snr
def update_snr(self, snr):
if self.snr is None:
self.snr = snr
else:
self.snr = 1 / (1 / self.snr + 1 / snr)
@signal_power.setter
def signal_power(self, signal_power):
self._signal_power = signal_power
@property
def rs(self):
return self._rs
@property
def df(self):
return self._df
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def channel(self):
return self._channel
@property
def noise_power(self):
return self._noise_power
@noise_power.setter
def noise_power(self, noise):
self._noise_power = noise
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = latency
def add_noise(self, noise):
self.noise_power += noise
def add_latency(self, latency):
self.latency += latency
def next(self):
self.path = self.path[1:]
class Node(object):
def __init__(self, node_dict):
self._label = node_dict['label']
self._position = node_dict['position']
self._connected_nodes = node_dict['connected_nodes']
self._successive = {}
@property
def label(self):
return self._label
@property
def position(self):
return self._position
@property
def connected_nodes(self):
return self._connected_nodes
@property
def successive(self):
return self._successive
@successive.setter
def successive(self, successive):
self._successive = successive
def propagate(self, lightpath, occupation=False):
path = lightpath.path
if len(path) > 1:
line_label = path[:2]
line = self.successive[line_label]
lightpath.next()
lightpath.signal_power = lightpath.optimized_powers[line_label]
lightpath = line.propagate(lightpath, occupation)
return lightpath
def optimize(self, lightpath):
path = lightpath.path
if len(path) > 1:
line_label = path[:2]
line = self.successive[line_label]
ase = line.ase_generation()
eta = line.nli_generation(1, lightpath.rs, lightpath.df)
p_opt = (ase / (2 * eta)) ** (1 / 3) # calculate optimum signal power
lightpath.optimized_powers.update({line_label: p_opt})
lightpath.next()
node = line.successive[lightpath.path[0]]
lightpath = node.optimize(lightpath)
return lightpath
class Line(object):
def __init__(self, line_dict):
self._label = line_dict['label']
self._length = line_dict['length'] * 1e3
self._amplifiers = int(np.ceil(self._length / 80e3))
self._span_length = self._length / self._amplifiers
# Set Gain to transparency
self._noise_figure = 7
# Physical Parameters of the Fiber
self._alpha = 4.6e-5
self._beta = 6.27e-27
self._gamma = 1.27e-3
self._Nch = line_dict['Nch']
self._state = ['free'] * self._Nch
self._successive = {}
self._gain = self.transparency()
@property
def label(self):
return self._label
@property
def length(self):
return self._length
@property
def state(self):
return self._state
@property
def amplifiers(self):
return self._amplifiers
@property
def span_length(self):
return self._span_length
@property
def gain(self):
return self._gain
@property
def Nch(self):
return self._Nch
@gain.setter
def gain(self, gain):
self._gain = gain
@property
def noise_figure(self):
return self._noise_figure
@noise_figure.setter
def noise_figure(self, noise_figure):
self._noise_figure = noise_figure
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
@state.setter
def state(self, state):
state = [s.lower().strip() for s in state]
if set(state).issubset(set(['free', 'occupied'])):
self._state = state
else:
print('ERROR: line state not recognized.Value:', set(state) - set(['free', 'occupied']))
@property
def successive(self):
return self._successive
@successive.setter
def successive(self, successive):
self._successive = successive
def transparency(self):
gain = 10 * np.log10(np.exp(self.alpha * self.span_length))
return gain
def latency_generation(self):
latency = self.length / (c * 2 / 3)
return latency
def noise_generation(self, lightpath):
noise = self.ase_generation() + self.nli_generation(lightpath.signal_power, lightpath.rs, lightpath.df)
return noise
def ase_generation(self):
gain_lin = 10 ** (self._gain / 10)
noise_figure_lin = 10 ** (self._noise_figure / 10)
N = self._amplifiers
f = 193.4e12
h = Planck
Bn = 12.5e9
ase_noise = N * h * f * Bn * noise_figure_lin * (gain_lin - 1)
return ase_noise
def nli_generation(self, signal_power, Rs, df,Bn = 12.5e9):
Pch = signal_power
loss = np.exp(- self.alpha * self.span_length) # modified this line
gain_lin = 10 ** (self.gain / 10)
N_spans = self.amplifiers
eta = 16 / (27 * pi) *self.gamma ** 2 / (4 * self.alpha * self.beta * Rs ** 3) * np.log(pi ** 2 * self.beta * Rs ** 2 * self.Nch **(2 * Rs / df) / (2 * self . alpha ))
nli_noise = N_spans * (Pch ** 3 * loss * gain_lin * eta * Bn)
return nli_noise
def propagate(self, lightpath, occupation=False):
# Update latency
latency = self.latency_generation()
lightpath.add_latency(latency)
# Update noise
signal_power = lightpath.signal_power
noise = self.noise_generation(lightpath)
lightpath.add_noise(noise)
# Update SNR
snr = lightpath.signal_power / noise
lightpath.update_snr(snr)
# Update line state
if occupation:
channel = lightpath.channel
new_state = self.state.copy()
new_state[channel] = 'occupied'
self.state = new_state
node = self.successive[lightpath.path[0]]
lightpath = node.propagate(lightpath, occupation)
return lightpath
class Network(object):
def __init__(self, json_path, nch=10, upgrade_line =''):
node_json = json.load(open(json_path, 'r'))
self._nodes = {}
self._lines = {}
self._connected = False
self._weighted_paths = None
self._route_space = None
self._Nch = nch
self._upgrade_line = upgrade_line
for node_label in node_json:
# Create the node instance
node_dict = node_json[node_label]
node_dict['label'] = node_label
node = Node(node_dict)
self._nodes[node_label] = node
# Create the line instances
for connected_node_label in node_dict['connected_nodes']:
line_dict = {}
line_label = node_label + connected_node_label
line_dict['label'] = line_label
node_position = np.array(node_json[node_label]['position'])
connected_node_position = np.array(node_json[connected_node_label]['position'])
line_dict['length'] = np.sqrt(np.sum((node_position - connected_node_position) ** 2))
line_dict['Nch'] = self.Nch
line = Line(line_dict)
self._lines[line_label] = line
if not upgrade_line == '':
self.lines[self._upgrade_line].noise_figure = self.lines[upgrade_line].noise_figure - 3
@property
def Nch(self):
return self._Nch
@property
def nodes(self):
return self._nodes
@property
def lines(self):
return self._lines
@property
def connected(self):
return self._connected
@property
def weighted_paths(self):
return self._weighted_paths
@property
def route_space(self):
return self._route_space
def draw(self):
nodes = self.nodes
for node_label in nodes:
n0 = nodes[node_label]
x0 = n0.position[0]
y0 = n0.position[1]
plt.plot(x0, y0, 'go', markersize=10)
plt.text(x0 + 20, y0 + 20, node_label)
for connected_node_label in n0.connected_nodes:
n1 = nodes[connected_node_label]
x1 = n1.position[0]
y1 = n1.position[1]
plt.plot([x0, x1], [y0, y1], 'b')
plt.title('Network')
plt.show()
def find_paths(self, label1, label2):
cross_nodes = [key for key in self.nodes.keys() if ((key != label1) & (key != label2))]
cross_lines = self.lines.keys()
inner_paths = {}
inner_paths['0'] = label1
for i in range(len(cross_nodes) + 1):
inner_paths[str(i + 1)] = []
for inner_path in inner_paths[str(i)]:
inner_paths[str(i + 1)] += [inner_path + cross_node
for cross_node in cross_nodes
if ((inner_path[-1] + cross_node in cross_lines) &
(cross_node not in inner_path))]
paths = []
for i in range(len(cross_nodes) + 1):
for path in inner_paths[str(i)]:
if path[-1] + label2 in cross_lines:
paths.append(path + label2)
return paths
def connect(self):
nodes_dict = self.nodes
lines_dict = self.lines
for node_label in nodes_dict:
node = nodes_dict[node_label]
for connected_node in node.connected_nodes:
line_label = node_label + connected_node
line = lines_dict[line_label]
line.successive[connected_node] = nodes_dict[connected_node]
node.successive[line_label] = lines_dict[line_label]
self._connected = True
def propagate(self, lightpath, occupation=False):
path = lightpath.path
start_node = self.nodes[path[0]]
propagated_lightpath = start_node.propagate(lightpath, occupation)
return propagated_lightpath
def optimization(self, lightpath):
# sets the lightpath power to the optimal \
# power calculated at each line ( node beginning the line)
path = lightpath.path
start_node = self.nodes[path[0]]
optimized_lightpath = start_node.optimize(lightpath)
optimized_lightpath.path = path
return optimized_lightpath
def set_weighted_paths(self): # in the pdf he removed the signal power
if not self.connected:
self.connect()
node_labels = self.nodes.keys()
pairs = []
for label1 in node_labels:
for label2 in node_labels:
if label1 != label2:
pairs.append(label1 + label2)
df = pd.DataFrame()
paths = []
latencies = []
noises = []
snrs = []
for pair in pairs:
for path in self.find_paths(pair[0], pair[1]):
path_string = ''
for node in path:
path_string += node + '->'
paths.append(path_string[:-2])
# Propagation
lightpath = Lightpath(path) # need to pass more
lightpath = self.optimization(lightpath)
lightpath = self.propagate(lightpath, occupation=False)
latencies.append(lightpath.latency)
noises.append(lightpath.noise_power)
snrs.append(
10 * np.log10(lightpath.signal_power / lightpath.noise_power)
)
df['path'] = paths
df['latency'] = latencies
df['noise'] = noises
df['snr'] = snrs
self._weighted_paths = df
route_space = pd.DataFrame()
route_space['path'] = paths
for i in range(self.Nch):
route_space[str(i)] = ['free'] * len(paths)
self._route_space = route_space
def available_paths(self, input_node, output_node): # path liberi per quella copia di nodi ma su tutti i canali
if self.weighted_paths is None:
self.set_weighted_paths()
all_paths = [path for path in self.weighted_paths.path.values
if ((path[0] == input_node) and (path[-1] == output_node))]
available_paths = []
for path in all_paths:
path_occupancy = self.route_space.loc[self.route_space.path == path].T.values[
1:] # route space è lista di path con una entry per ogni lambda
if 'free' in path_occupancy: # se i canali sono tuttio occupati esclude il path
available_paths.append(path)
return available_paths
def find_best_snr(self, input_node, output_node):
available_paths = self.available_paths(input_node, output_node)
if available_paths:
inout_df = self.weighted_paths.loc[self.weighted_paths.path.isin(available_paths)]
best_snr = np.max(inout_df.snr.values)
best_path = inout_df.loc[inout_df.snr == best_snr].path.values[0]
else:
best_path = None
return best_path
def find_best_latency(self, input_node, output_node):
available_paths = self.available_paths(input_node, output_node)
if available_paths:
inout_df = self.weighted_paths.loc[self.weighted_paths.path.isin(available_paths)]
best_latency = np.min(inout_df.latency.values)
best_path = inout_df.loc[inout_df.latency == best_latency].path.values[0]
else:
best_path = None
return best_path
def stream(self, connections, best='latency', transceiver='shannon'):
streamed_connections = []
for connection in connections:
input_node = connection.input_node
output_node = connection.output_node
signal_power = connection.signal_power
if best == 'latency':
path = self.find_best_latency(input_node, output_node)
elif best == 'snr':
path = self.find_best_snr(input_node, output_node)
else:
print('ERROR: best input not recognized. Value:', best)
continue
if path:
path_occupancy = self.route_space.loc[self.route_space.path == path].T.values[1:]
channel = [i for i in range(len(path_occupancy)) if path_occupancy[i] == 'free'][0] # take the first available channel
path = path.replace('->', '')
in_lightpath = Lightpath(path, channel, transceiver=transceiver)
in_lightpath = self.optimization(in_lightpath)
out_lightpath = self.propagate(in_lightpath, occupation=True)
self.calculate_bitrate(out_lightpath)
if out_lightpath.bitrate == 0.0:
[self.update_route_space(lp.path, lp.channel,'free') for lp in connection.lightpaths] # set everything we tried to occupy in the path if we fail to get the whole requested rate
connection.block_connection()
else:
connection.set_connection(out_lightpath)
self.update_route_space(path, out_lightpath.channel, 'occupied') # here
if connection.residual_rate_request > 0:
self.stream([connection], best, transceiver) # removed type
else:
#[self.update_route_space(lp.path, lp.channel, 'free') for lp in connection.lightpaths] # in case we dont have a path
connection.block_connection()
streamed_connections.append(connection)
return streamed_connections
@staticmethod
def path_to_line_set(path):
path = path.replace('->', '')
return set([path[i] + path[i + 1] for i in range(len(path) - 1)])
def update_route_space(self, path, channel, state):
all_paths = [self.path_to_line_set(p) for p in self.route_space.path.values]
states = self.route_space[str(channel)]
lines = self.path_to_line_set(path)
for i in range(len(all_paths)):
line_set = all_paths[i]
if lines.intersection(line_set):
states[i] = state
self.route_space[str(channel)] = states
def calculate_bitrate(self, lightpath, bert=1e-3):
snr = lightpath.snr
Bn = 12.5e9
Rs = lightpath.rs
if lightpath.transceiver.lower() == 'fixed-rate':
snrt = 2 * erfcinv(2 * bert) * (Rs / Bn)
rb = np.piecewise(snr, [snr < snrt, snr >= snrt], [0, 100])
elif lightpath.transceiver.lower() == 'flex-rate':
snrt1 = 2 * erfcinv(2 * bert) ** 2 * (Rs / Bn)
snrt2 = (14 / 3) * erfcinv(3 / 2 * bert) ** 2 * (Rs / Bn)
snrt3 = 10 * erfcinv(8 / 3 * bert) ** 2 * (Rs / Bn)
cond1 = (snr < snrt1)
cond2 = (snr >= snrt1 and snr < snrt2)
cond3 = (snr >= snrt2 and snr < snrt3)
cond4 = (snr >= snrt3)
rb = np.piecewise(snr, \
[cond1, cond2, cond3, cond4], [0, 100, 200, 400])
elif lightpath.transceiver.lower() == 'shannon':
rb = 2 * Rs * np.log2(1 + snr * (Rs / Bn)) * 1e-9
lightpath.bitrate = float(rb) # set bitrate in lightpath
return float(rb)
class Connection(object):
def __init__(self, input_node, output_node, rate_request =0):
self._input_node = input_node
self._output_node = output_node
self._signal_power = None
self._latency = 0
self._snr = []
self._rate_request = float(rate_request)
self._residual_rate_request = float(rate_request)
self._lightpaths = []
self._bitrate = None # removed in the code
@property
def input_node(self):
return self._input_node
@property
def rate_request(self):
return self._rate_request
@property
def residual_rate_request(self):
return self._residual_rate_request
@property
def bitrate(self):
return self._bitrate
@bitrate.setter
def bitrate(self, bitrate):
self._bitrate = bitrate
def calculate_capacity(self):
self.bitrate = sum([lightpath.bitrate for lightpath in self.lightpaths])
return self.bitrate
def set_connection(self, lightpath):
self.signal_power = lightpath.signal_power
self.latency = max(self.latency, lightpath.latency)
self.snr = 10 * np.log10(lightpath.snr)
self.lightpaths = lightpath
self._residual_rate_request = self._residual_rate_request - lightpath.bitrate
return self
def block_connection(self):
self.latency = None
self.snr = 0
self.bitrate = 0
self.clear_lightpaths()
return self
@property
def output_node(self):
return self._output_node
@property
def signal_power(self):
return self._signal_power
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = latency
@signal_power.setter
def signal_power(self, signal_power):
self._signal_power = signal_power
@property
def snr(self):
return self._snr
#@snr.setter
#def snr(self, snr):
#self._snr = snr
@property
def lightpaths(self):
return self._lightpaths
@lightpaths.setter
def lightpaths(self, lightpath):
self._lightpaths.append(lightpath)
@snr.setter
def snr(self, snr):
self._snr.append(snr)
def clear_lightpaths(self):
self._lightpaths = []
def create_traffic_matrix(nodes, rate, multiplier=5):
s = pd.Series(data=[0.0] * len(nodes), index=nodes)
df = pd.DataFrame(float(rate * multiplier), index=s.index, columns=s.index, dtype=s.dtype)
np.fill_diagonal(df.values, s)
return df
def plot3Dbars(t):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_data, y_data = np.meshgrid(np.arange(t.shape[1]), np.arange(t.shape[0]))
x_data = x_data.flatten()
y_data = y_data.flatten()
z_data = t.flatten()
ax.bar3d(x_data, y_data, np.zeros(len(z_data)), 1, 1, z_data)
plt.show()
def main():
NMC = 2 # number of monti carlo simulation
node_pairs_realizations = []
stream_conn_list = []
lines_state_list = []
for i in range(NMC):
print('Monte - Carlo Realization #{:d}'.format(i + 1))
network = Network('nodes_9.json', nch=10, upgrade_line='DB') # number of channels
# creates nodes and line objects
network.connect() # connects the net by setting the line \ successive attribute with the node object at the end of the line
#network.draw()
node_labels = list(network.nodes.keys())
T = create_traffic_matrix(node_labels,600, multiplier=5)
t = T.values
# print (T)
node_pairs = list(filter(lambda x: x[0] != x[1], list(it.product(node_labels, node_labels))))
shuffle(node_pairs) # Create allocation request sequence realization
node_pairs_realizations.append(copy.deepcopy(node_pairs))
connections = []
for node_pair in node_pairs:
connection = Connection(node_pair[0], node_pair[-1], float(T.loc[node_pair[0], node_pair[-1]]))
connections.append(connection)
streamed_connections = network.stream(connections, best='snr', transceiver='shannon')
stream_conn_list.append(streamed_connections)
lines_state_list.append(network.lines) # Get lines state
# Get MC stats
snr_conns = []
rbl_conns = []
rbc_conns = []
for streamed_conn in stream_conn_list:
snrs = []
rbl = []
[snrs.extend(connection.snr) for connection in streamed_conn]
for connection in streamed_conn:
for lightpath in connection.lightpaths:
rbl.append(lightpath.bitrate)
rbc = [connection.calculate_capacity() for connection in streamed_conn]
snr_conns.append(snrs)
rbl_conns.append(rbl)
rbc_conns.append(rbc)
# Congestion
lines_labels = list(lines_state_list[0].keys())
congestions = {label: [] for label in lines_labels}
for line_state in lines_state_list:
for line_label, line in line_state.items():
cong = line.state.count('occupied') / len(line.state)
congestions[line_label].append(cong)
avg_congestion = {label: [] for label in lines_labels}
for line_label, cong in congestions.items():
avg_congestion[line_label] = np.mean(cong)
plt.bar(range(len(avg_congestion)), list(avg_congestion.values()), align='center')
plt.xticks(range(len(avg_congestion)), list(avg_congestion.keys()))
plt.show()
avg_snr_list = []
avg_rbl_list = []
traffic_list = []
[traffic_list.append(np.sum(rbl_list)) for rbl_list in rbl_conns]
[avg_rbl_list.append(np.mean(rbl_list)) for rbl_list in rbl_conns]
[avg_snr_list.append(np.mean(list(filter(lambda x: x != 0, snr_list)))) for snr_list in snr_conns]
print('\n')
print('Line to upgrade: {}', format(max(avg_congestion, key=avg_congestion.get)))
print('Avg Total Traffic: {:.2 f} Tbps', format(np.mean(traffic_list) * 1e-3))
print('Avg Lighpath Bitrate: {:.2f} Gbps ', format(np.mean(avg_rbl_list)))
print('Avg Lighpath SNR: {:.2f} dB', format(np.mean(avg_snr_list)))
for id_mcr in range(NMC):
plt.hist(snr_conns[id_mcr], bins=10)
plt.title('SNR Distribution [dB]')
#plt.show()
# plt.hist(rbl_conns[id_mcr], bins=10)
# plt.title('Lightpath Capacity Distribution [Gbps]')
# plt.show()
main() |
<filename>NewServer.py<gh_stars>0
#!/usr/bin/python
import datetime
import sys
import os
import json
execfile('newSearch.py')
from config import port
import math
from flask import Flask
from flask import request
from flask.ext.cors import CORS, cross_origin
from loadManager import DataManager
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
dataManager = DataManager()
#
# Dig out a field, convert it using convertFunction, and check the result
# using checkFunction. convertFunction should be something which takes a string
# and returns the right type, throwing a ValueError if there is a problem. checkFunction
# takes a single parameter and returns True if it's valid, False otherwise. Annotates
# requestResult either with the value or with the error message if there is one. This
# is designed to be called multiple times with the same requestResult, so error, once
# set to True, should never be set to False.
#
def getField(request, requestResult, fieldName, convertFunction, checkFunction):
value = request.args.get(fieldName)
if (not value):
requestResult['error'] = True
requestResult['message'] += 'fieldName %s missing. ' % fieldName
try:
val = convertFunction(value)
if (checkFunction(val)):
requestResult[fieldName] = val
else:
requestResult['error'] = True
requestResult['message'] += 'validity check failed for field %s, value %s' % (fieldName, value)
except (ValueError, TypeError):
requestResult['error'] = True
requestResult['message'] += 'conversion function failed for fieldName %s, not %s. ' % (fieldName, value)
#
# Parse a request and return the result. The specifications
# are the fields, so all this does is iterate over the fields
# provided as arguments
#
def parseRequest(request, fields):
result = {'error': False, 'message': ''}
for (fieldName, conversion, checkFunction) in fields:
getField(request, result, fieldName, conversion, checkFunction)
return result
basicParseFields = [('year', int, lambda x: x in range(1997, 2016)),
('month', int, lambda x: x in range(1, 13)),
('res', int, lambda x: x in [1, 2, 4, 10])
]
fullParseFields = basicParseFields + [
('nwLat', float, lambda x: x <= 90.0 and x >= -90.0),
('seLat', float, lambda x: x <= 90.0 and x >= -90.0),
('nwLon', float, lambda x: x <= 180.0 and x >= -180.0),
('seLon', float, lambda x: x <= 180.0 and x >= -180.0),
]
#
# Turn a structure into a string
#
@app.route('/test')
def test_basic():
result = parseRequest(request, basicParseFields)
if result['error']:
return 'Error in request ' + result['message']
else:
return json.dumps(result)
@app.route('/test_full')
def test_full():
result = parseRequest(request, fullParseFields)
if result['error']:
return 'Error in request ' + result['message']
else:
return json.dumps(result)
def parseAndCheck(request):
query = parseRequest(request, fullParseFields)
if query['error']:
query['message'] = 'Error in request ' + query['message']
return query
if (not dataManager.checkLoadable(query['year'], query['month'], query['res'])):
query['error'] = True
query['message'] = "Dataset %s is not loaded" % convertToString(query['year'], query['month'], query['res'])
else:
query['error'] = False
return query
degreeFields = ['nwLat', 'nwLon', 'seLat', 'seLon']
def dumpQuery(query):
str = ['query[%s] = %s' % (key, query[key]) for key in query]
print ', '.join(str)
sys.stdout.flush()
def convertDegreesToTenthsOfDegrees(query, fields):
for field in fields:
query[field] = int(math.floor(query[field] * 10))
@app.route('/get_time')
def get_times():
query = parseAndCheck(request)
if (query['error']):
return query['message']
convertDegreesToTenthsOfDegrees(query, degreeFields)
stats = getStats(dataManager, query['year'], query['month'], query['res'],
query['nwLat'], query['seLat'], query['nwLon'], query['seLon'])
return json.dumps(stats)
@app.route('/show_inventory')
def get_inventory():
loadable = '\n'.join(["year = %d, month=%d, res=%d" % tuple for tuple in dataManager.getLoadableKeys()])
print loadable
inventory = '\n'.join(join(["year = %d, month=%d, res=%d" % tuple for tuple in dataManager.getAllLoadedKeys()]))
print inventory
size = '\nTotal Bytes loaded: %dMB\n' % int(round(dataManager.getSize()/1.0E6))
print size
return loadable + '\nData sets loaded\n' + inventory + size
@app.route('/get_data')
def get_data():
query = parseAndCheck(request)
if (query['error']):
return query['message']
convertDegreesToTenthsOfDegrees(query, degreeFields)
result = searchDB(dataManager, query['year'], query['month'], query['res'],
query['nwLat'], query['seLat'], query['nwLon'], query['seLon'])
return json.dumps({
'sw': result['swCorner'], 'ptsPerRow': result['pointsPerRow'],
'ptsPerDegree': result['pointsPerDegree'], 'base64String': result['base64String']
})
@app.route('/test_query')
def get_query():
query = parseAndCheck(request)
if (query['error']):
return query['message']
convertDegreesToTenthsOfDegrees(query, degreeFields)
return json.dumps(query)
@app.route('/get_data_readable')
def get_data_readable():
query = parseAndCheck(request)
if (query['error']):
return query['message']
convertDegreesToTenthsOfDegrees(query, degreeFields)
result = searchDBReturnRows(dataManager, query['year'], query['month'], query['res'],
query['nwLat'], query['seLat'], query['nwLon'], query['seLon'], False)
return json.dumps({
'sw': result['swCorner'], 'ptsPerRow': result['pointsPerRow'],
'ptsPerDegree': result['pointsPerDegree'], 'base64String': '\n'.join(result['sequences'])
})
@app.route('/get_data_rectangle')
def get_data_rectangle():
query = parseAndCheck(request)
if (query['error']):
return query['message']
convertDegreesToTenthsOfDegrees(query, degreeFields)
searchResult = searchDBReturnRows(dataManager, query['year'], query['month'], query['res'],
query['nwLat'], query['seLat'], query['nwLon'], query['seLon'], False)
indicesOnly = 'indicesOnly' in request.args
result = convertToRectangles(searchResult, 'indicesOnly')
return json.dumps({
'sw': searchResult['swCorner'],
'ptsPerDegree': searchResult['pointsPerDegree'], 'rectangles': ','.join(result)
})
@app.route('/help')
def print_help():
str = '<p>/show_inventory: print loaded data'
str += '<p>/get_data?<args>:get the data as a base-64 string with metadata. See below for argument format'
str += '<p>/get_data_readable?<args>:same as get_data but put the base64 string into rows for human readability'
str += '<p>/get_times?<args>: get the statistics on the query'
str += '<p>/get_query<args>: parse the query and return the parsed result, used for debugging'
str += '<p>/get_data_rectangle?<args>: get the data as a set of 5-tuple rectangles rather than as a set of strings. In addition to'
str += ' the usual args, if indicesOnly is given as an argument, gives row/column indices rather than lat/lon for coordinates'
str += '<p>/help: print this message\n'
str += '<args>: seLon=<longitude>, nwLon=<longitude>, seLat=<latitude>, nwLat=<latitude>,'
str += 'year=<year>, month=<1-12>, res=<1,2,4, or 10>'
return str
@app.route('/')
def index():
return print_help()
from config import ssl_directive
if __name__ == '__main__':
# for fileName in yearFiles:
# execfile(fileName)
# print memory()
# app.debug = True
dataManager.checkExistenceSanityCheck()
if (ssl_directive['use_ssl']):
app.run(host='0.0.0.0', port = port, ssl_context = ssl_directive['ssl_context'])
else:
app.run(host='0.0.0.0', port=port)
|
<reponame>ewanbarr/mpikat<gh_stars>1-10
import mock
import re
import json
from tornado.gen import coroutine, Return, sleep
from tornado.testing import AsyncTestCase
from katcp.testutils import mock_req, handle_mock_req
from katpoint import Target
from mpikat.meerkat.fbfuse import BaseFbfConfigurationAuthority
from mpikat.meerkat.test.antennas import ANTENNAS
class SensorNotFoundError(Exception):
pass
class MockFbfConfigurationAuthority(BaseFbfConfigurationAuthority):
def __init__(self, host, port):
super(MockFbfConfigurationAuthority, self).__init__(host, port)
self.sb_return_values = {}
self.target_return_values = {}
def set_sb_config_return_value(self, proxy_id, sb_id, return_value):
self.sb_return_values[(proxy_id, sb_id)] = return_value
def set_target_config_return_value(self, proxy_id, target_string, return_value):
target = Target(target_string)
self.target_return_values[(proxy_id, target.name)] = return_value
@coroutine
def get_target_config(self, proxy_id, target_string):
target = Target(target_string)
raise Return(self.target_return_values[(proxy_id, target.name)])
@coroutine
def get_sb_config(self, proxy_id, sb_id):
raise Return(self.sb_return_values[(proxy_id, sb_id)])
class MockKatportalClientWrapper(mock.Mock):
@coroutine
def get_observer_string(self, antenna):
if re.match("^[mM][0-9]{3}$", antenna):
raise Return("{}, -30:42:39.8, 21:26:38.0, 1035.0, 13.5".format(antenna))
else:
raise SensorNotFoundError("No antenna named {}".format(antenna))
@coroutine
def get_observer_strings(self, antennas):
for antenna in antennas:
observers = {}
if re.match("^[mM][0-9]{3}$", antenna):
observers[antenna] = "{}, -30:42:39.8, 21:26:38.0, 1035.0, 13.5".format(antenna)
else:
raise SensorNotFoundError("No antenna named {}".format(antenna))
raise Return(observers)
@coroutine
def get_antenna_feng_id_map(self, instrument_name, antennas):
ant_feng_map = {antenna:ii for ii,antenna in enumerate(antennas)}
raise Return(ant_feng_map)
@coroutine
def get_bandwidth(self, stream):
raise Return(856e6)
@coroutine
def get_cfreq(self, stream):
raise Return(1.28e9)
@coroutine
def get_sideband(self, stream):
raise Return("upper")
@coroutine
def get_sync_epoch(self):
raise Return(1532530856)
@coroutine
def get_itrf_reference(self):
raise Return(12312312, 123123123, 12312312)
@coroutine
def get_proposal_id(self):
raise Return("USE-TEST-USE")
@coroutine
def get_sb_id(self):
raise Return("20191100-0001")
@coroutine
def get_fbfuse_address(self):
raise Return(("127.0.0.1", 5000))
def get_sensor_tracker(self, component, sensor_name):
return mock.Mock()
@coroutine
def get_fbfuse_sb_config(self, product_id):
config = {'phase-reference': "source,radec,00:00:00,00:00:00",
'bandwidth': 856000000.0,
'centre-frequency': 1280000000.0,
'coherent-beam-antennas': '',
'coherent-beam-count': 36,
'coherent-beam-count-per-group': 6,
'coherent-beam-fscrunch': 1,
'coherent-beam-heap-size': 8192,
'coherent-beam-idx1-step': 1232131123,
'coherent-beam-multicast-group-mapping': json.dumps({
'spead://172.16.31.10:7147': [
'cfbf00000',
'cfbf00001',
'cfbf00002',
'cfbf00003',
'cfbf00004',
'cfbf00005'],
'spead://172.16.17.32:7147': [
'cfbf00006',
'cfbf00007',
'cfbf00008',
'cfbf00009',
'cfbf00010',
'cfbf00011'],
'spead://172.16.31.10:7147': [
'cfbf00012',
'cfbf00013',
'cfbf00014',
'cfbf00015',
'cfbf00016',
'cfbf00017'],
'spead://172.16.17.32:7147': [
'cfbf00018',
'cfbf00019',
'cfbf00020',
'cfbf00021',
'cfbf00022',
'cfbf00023'],
'spead://192.168.127.12:7147': [
'cfbf00024',
'cfbf00025',
'cfbf00026',
'cfbf00027',
'cfbf00028',
'cfbf00029'],
'spead://172.16.31.10:7147': [
'cfbf00030',
'cfbf00031',
'cfbf00032',
'cfbf00033',
'cfbf00034',
'cfbf00035']}),
'coherent-beam-multicast-groups': 'spead://172.16.31.10+5:7147',
'coherent-beam-multicast-groups-data-rate': 600000000.0,
'coherent-beam-ngroups': 6,
'coherent-beam-subband-nchans': 16,
'coherent-beam-time-resolution': 7.9e-05,
'coherent-beam-tscrunch': 16,
'incoherent-beam-antennas': '',
'incoherent-beam-count': 1,
'incoherent-beam-fscrunch': 1,
'incoherent-beam-heap-size': 8192,
'incoherent-beam-idx1-step': 1232131123,
'incoherent-beam-multicast-group': 'spead://172.16.58.3:7147',
'incoherent-beam-multicast-group-data-rate': 100000000.0,
'incoherent-beam-subband-nchans': 16,
'incoherent-beam-time-resolution': 7.9e-05,
'incoherent-beam-tscrunch': 16,
'nchannels': 4096}
raise Return(config)
@coroutine
def get_fbfuse_proxy_id(self):
raise Return("fbfuse_1")
@coroutine
def get_fbfuse_coherent_beam_positions(self, product_id):
beams = {}
for ii in range(128):
beams["cfbf{:05d}".format(ii)] = "source0,radec,00:00:00,00:00:00"
return beams
def get_sensor_tracker(self, component, sensor_name):
return DummySensorTracker()
class DummySensorTracker(object):
def __init__(self, *args, **kwargs):
pass
@coroutine
def start(self):
pass
@coroutine
def wait_until(self, state, interrupt):
yield sleep(10)
|
<reponame>nipunagarwala/cs273b_final_project
import numpy as np
import os
import json
import csv
import argparse
BRAIN_DIR = os.path.abspath('/data/originalfALFFData')
BRAIN_DIR_AUG_ALL = os.path.abspath('/data/augmented_swap_all')
BRAIN_DIR_AUG_PARTIAL = os.path.abspath('/data/augmented_swap_partial')
# BRAIN_DIR_AUG_PARTIAL = os.path.abspath('/data/augmented_swap_partial_steal_25_split_80_20')
# PHENOTYPE_FILE = os.path.abspath('/data/processed_imputed_phenotype_data.csv')
PHENOTYPE_FILE = os.path.abspath('/data/normalized_imputed_phenotype_data.csv')
OUTPUT_DIR = os.path.abspath('/data/binaries_new2')
ALL_FILES = os.path.abspath('/data/all2.json')
TRAIN_FILE = os.path.abspath('/data/train2.json')
TEST_FILE = os.path.abspath('/data/test2.json')
OUTPUT_DIR_PARTIAL_RANDOM = os.path.abspath('/data/swap_partial_binaries2')
# OUTPUT_DIR_PARTIAL_RANDOM = os.path.abspath('/data/augmented_swap_partial_steal_25_split_80_20_binaries')
TRAIN_FILE_PARTIAL_RANDOM = os.path.abspath('/data/swap_partial_train2.json')
# TRAIN_FILE_PARTIAL_RANDOM = os.path.abspath('/data/train_20_80_split.json')
OUTPUT_DIR_ALL_RANDOM = os.path.abspath('/data/swap_all_binaries')
TRAIN_FILE_ALL_RANDOM = os.path.abspath('/data/swap_all_train.json')
def _create_feature_binary(X_data, X_image, y_feature, filename):
# Create binary format
X_1 = X_data.flatten().tolist()
X_2 = X_image.flatten().tolist()
y = [y_feature]
# Label first, then 'image'
out = np.array(y + X_1 + X_2, np.float32)
# Save
out.tofile(filename)
def _normalize_brain(brain_data):
# Get mean and variance
mean = brain_data.mean()
std_dev = brain_data.std()
# Normalize
return (brain_data - mean)/brain_data.std()
def create_compressed_binary(phenotype, image, label, output_dir, id, prefix='compressed_'):
bin_filename = prefix + id + '.bin'
bin_path = os.path.join(output_dir, bin_filename)
_create_feature_binary(phenotype, image, label, bin_path)
return bin_path
def convert_brain_npy(brain_dir=BRAIN_DIR, phenotype_file=PHENOTYPE_FILE, output_dir=OUTPUT_DIR, prefix='original_'):
path_list = []
with open(phenotype_file, 'r') as csvfile:
patient_reader = csv.reader(csvfile)
for patient in patient_reader:
print patient
patient_id = patient[0]
print patient_id
if patient_id == "":
continue
# Retrieve data from phenotype CSV
patient_label = np.float32(patient[3])
phenotype_data = patient[5:16] + patient[19:]
phenotype_data = np.asarray(phenotype_data, dtype=np.float32)
# Create necessary file names
# brain_filename = pool_type + patient_id + pool_size
brain_filename = prefix + patient_id
npy_filename = brain_filename + '.npy'
bin_filename = patient_id + '.bin'
# Create necessary paths
npy_path = os.path.join(brain_dir, npy_filename)
bin_path = os.path.join(output_dir, bin_filename)
# Load brain images from .npy files
brain_data = np.load(npy_path)
brain_data = brain_data.astype(np.float32)
normailized_brain = _normalize_brain(brain_data)
# Create binaries from all data
_create_feature_binary(phenotype_data, normailized_brain, patient_label, bin_path)
path_list.append(bin_path)
return path_list
def convert_random_brain_npy(brain_dir, output_dir, use_pheno, phenotype_file=PHENOTYPE_FILE):
path_list = []
aug_file_list = os.listdir(brain_dir)
if use_pheno:
print "Using phenotype data."
with open(phenotype_file, 'r') as csvfile:
patient_reader = csv.reader(csvfile)
count = 0
for patient in patient_reader:
patient_id = patient[0]
if patient_id == "":
continue
patched_brains = [f for f in aug_file_list if patient_id == f.split("_")[0]]
# Retrieve data from phenotype CSV
patient_label = np.float32(patient[3])
phenotype_data = patient[5:16] + patient[19:]
phenotype_data = np.asarray(phenotype_data, dtype=np.float32)
for brain_npy in patched_brains:
print "Reading NPY file: " + brain_npy
brain_file_components = brain_npy.split(".")
# label = 1 if brain_npy.split("_")[1] == "autism" else 0
# count += int(int(patient_label) != label)
# print patient_label
# print brain_file_components
brain_bin = brain_file_components[0] + ".bin"
# Create necessary paths
npy_path = os.path.join(brain_dir, brain_npy)
bin_path = os.path.join(output_dir, brain_bin)
# Load brain images from .npy files
brain_data = np.load(npy_path)
brain_data = brain_data.astype(np.float32)
normailized_brain = _normalize_brain(brain_data)
# Create binaries from all data
_create_feature_binary(phenotype_data, normailized_brain, patient_label, bin_path)
path_list.append(bin_path)
print count
else:
print "NOT using phenotype data."
# Iterate though all augmented training files
for brain_npy in aug_file_list:
patient_label = 1.0 if "autism" in brain_npy else 0.0
patient_label = np.float32(patient_label)
phenotype_data = np.zeros(29)
file_components = brain_npy.split(".")
brain_bin = file_components[0] + ".bin"
# Create necessary paths
npy_path = os.path.join(brain_dir, brain_npy)
bin_path = os.path.join(output_dir, brain_bin)
# Load brain images from .npy files
brain_data = np.load(npy_path)
brain_data = brain_data.astype(np.float32)
normailized_brain = _normalize_brain(brain_data)
# Create binaries from all data
_create_feature_binary(phenotype_data, normailized_brain, patient_label, bin_path)
path_list.append(bin_path)
return path_list
def split_brain_binaries(file_list, train_file, test_file, split_fraction=0.9):
# Error handling
if split_fraction >= 1.0 or split_fraction <= 0.0:
split_fration = 0.9
# Calculate number of files to split
num_files = len(file_list)
num_train = int(num_files * split_fraction)
# Permute files
perm = np.arange(num_files)
np.random.shuffle(perm)
file_list = [file_list[i] for i in perm]
# Split file list
train_files = file_list[:num_train]
test_files = file_list[num_train:]
with open(train_file, 'w') as outfile1:
json.dump(train_files, outfile1)
with open(test_file, 'w') as outfile2:
json.dump(test_files, outfile2)
def save_and_split(file_list, all_files=ALL_FILES, train_file=TRAIN_FILE, test_file=TEST_FILE):
with open(all_files, 'w') as outfile:
json.dump(file_list, outfile)
split_brain_binaries(file_list, train_file, test_file)
def main():
parser = argparse.ArgumentParser(description='Evaluation procedure for Salami CNN.')
data_group = parser.add_mutually_exclusive_group()
random_group = parser.add_mutually_exclusive_group()
data_group.add_argument('--aug', action="store_true", help='Choose dataset to create')
random_group.add_argument('--all_random', action="store_true", help='Training the model')
args = parser.parse_args()
if args.aug:
use_pheno = not args.all_random
brain_dir = BRAIN_DIR_AUG_ALL if args.all_random else BRAIN_DIR_AUG_PARTIAL
output_dir = OUTPUT_DIR_ALL_RANDOM if args.all_random else OUTPUT_DIR_PARTIAL_RANDOM
train_file = TRAIN_FILE_ALL_RANDOM if args.all_random else TRAIN_FILE_PARTIAL_RANDOM
print brain_dir, output_dir, train_file
file_list = convert_random_brain_npy(brain_dir, output_dir, use_pheno)
with open(train_file, 'w') as outfile:
json.dump(file_list, outfile)
else:
file_list = convert_brain_npy()
save_and_split(file_list)
if __name__ == '__main__':
main()
|
import statistics
from django.contrib import admin
from django.db import models
from django.db.models import Count
from django.db.models.functions import Lower
from django.utils.html import format_html
from allianceauth.eveonline.models import EveAllianceInfo, EveCorporationInfo
from allianceauth.services.hooks import get_extension_logger
from app_utils.django import admin_boolean_icon_html
from app_utils.logging import LoggerAddTag
from . import __title__, app_settings, tasks
from .models import (
Notification,
Owner,
OwnerCharacter,
Structure,
StructureService,
StructureTag,
Webhook,
)
logger = LoggerAddTag(get_extension_logger(__name__), __title__)
class RenderableNotificationFilter(admin.SimpleListFilter):
title = "can be send"
parameter_name = "notification_renderable"
def lookups(self, request, model_admin):
return (
("yes", "Yes"),
("no", "No"),
)
def queryset(self, request, queryset):
"""Return the filtered queryset"""
if self.value() == "yes":
return queryset.annotate_can_be_rendered().filter(can_be_rendered_2=True)
elif self.value() == "no":
return queryset.annotate_can_be_rendered().filter(can_be_rendered_2=False)
else:
return queryset
class OwnerFilter(admin.SimpleListFilter):
title = "owner"
parameter_name = "owner_filter"
def lookups(self, request, model_admin):
return (
Notification.objects.values_list(
"owner__pk", "owner__corporation__corporation_name"
)
.distinct()
.order_by("owner__corporation__corporation_name")
)
def queryset(self, request, queryset):
"""Return the filtered queryset"""
value = self.value()
if value:
return queryset.filter(owner__pk=self.value())
else:
return queryset
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = (
"notification_id",
"owner",
"notif_type",
"timestamp",
"created",
"last_updated",
"_webhooks",
"_is_sent",
"_is_timer_added",
)
ordering = ["-timestamp", "-notification_id"]
list_filter = (
OwnerFilter,
# "owner",
RenderableNotificationFilter,
"is_sent",
"notif_type",
)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("owner__webhooks").select_related(
"owner", "owner__corporation", "sender"
)
def _webhooks(self, obj):
if not obj.can_be_rendered:
return format_html("<i>N/A</i>")
names = sorted(
[
webhook.name
for webhook in obj.owner.webhooks.all()
if obj.notif_type in webhook.notification_types
]
)
if names:
return ", ".join(names)
else:
return format_html(
'<b><span style="color: orange">Not configured</span></b>'
)
def _is_sent(self, obj):
value = obj.is_sent if obj.can_be_rendered else None
return admin_boolean_icon_html(value)
def _is_timer_added(self, obj):
value = obj.is_timer_added if obj.can_have_timer else None
return admin_boolean_icon_html(value)
actions = (
"mark_as_sent",
"mark_as_unsent",
"send_to_webhooks",
"process_for_timerboard",
)
def mark_as_sent(self, request, queryset):
notifications_count = 0
for obj in queryset:
obj.is_sent = True
obj.save()
notifications_count += 1
self.message_user(
request, "{} notifications marked as sent".format(notifications_count)
)
mark_as_sent.short_description = "Mark selected notifications as sent"
def mark_as_unsent(self, request, queryset):
notifications_count = 0
for obj in queryset:
obj.is_sent = False
obj.save()
notifications_count += 1
self.message_user(
request, "{} notifications marked as unsent".format(notifications_count)
)
mark_as_unsent.short_description = "Mark selected notifications as unsent"
def send_to_webhooks(self, request, queryset):
obj_pks = [obj.pk for obj in queryset if obj.can_be_rendered]
ignored_count = len([obj for obj in queryset if not obj.can_be_rendered])
tasks.send_notifications.delay(obj_pks)
message = (
f"Initiated sending of {len(obj_pks)} notification(s) to "
f"configured webhooks."
)
if ignored_count:
message += (
f" Ignored {ignored_count} notification(s), which can not be rendered."
)
self.message_user(request, message)
send_to_webhooks.short_description = (
"Send selected notifications to configured webhooks"
)
def process_for_timerboard(self, request, queryset):
notifications_count = 0
ignored_count = 0
for obj in queryset:
if obj.process_for_timerboard():
notifications_count += 1
else:
ignored_count += 1
message = (
f"Added timers from {notifications_count} notifications to timerboard."
)
if ignored_count:
message += f" Ignored {ignored_count} notification(s), which has no relation to timers."
self.message_user(request, message)
process_for_timerboard.short_description = (
"Process selected notifications for timerboard"
)
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
class OwnerSyncStatusFilter(admin.SimpleListFilter):
title = "is sync ok"
parameter_name = "sync_status__exact"
def lookups(self, request, model_admin):
"""List of values to allow admin to select"""
return (
("yes", "Yes"),
("no", "No"),
)
def queryset(self, request, queryset):
"""Return the filtered queryset"""
if self.value() == "yes":
return queryset.filter(
structures_last_update_ok=True,
notifications_last_update_ok=True,
forwarding_last_update_ok=True,
assets_last_update_ok=True,
)
elif self.value() == "no":
return queryset.exclude(
structures_last_update_ok=True,
notifications_last_update_ok=True,
forwarding_last_update_ok=True,
assets_last_update_ok=True,
)
else:
return queryset
class OwnerCharacterAdminInline(admin.TabularInline):
model = OwnerCharacter
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
@admin.register(Owner)
class OwnerAdmin(admin.ModelAdmin):
list_display = (
"_corporation",
"_alliance",
"_characters",
"_is_active",
"_webhooks",
"_has_default_pings_enabled",
"_ping_groups",
"_is_alliance_main",
"_is_sync_ok",
"_structures_count",
"_notifications_count",
)
list_filter = (
("corporation__alliance", admin.RelatedOnlyFieldListFilter),
"has_default_pings_enabled",
"is_active",
"is_alliance_main",
OwnerSyncStatusFilter,
)
ordering = ["corporation__corporation_name"]
search_fields = ["corporation__corporation_name"]
def get_queryset(self, request):
qs = super().get_queryset(request)
return (
qs.select_related("corporation", "corporation__alliance")
.prefetch_related("ping_groups", "webhooks")
.annotate(notifications_count=Count("notifications", distinct=True))
.annotate(structures_count=Count("structures", distinct=True))
.annotate_characters_count()
)
def _characters(self, obj) -> int:
return obj.x_characters_count
_characters.admin_order_field = "x_characters_count"
def _has_default_pings_enabled(self, obj):
return obj.has_default_pings_enabled
_has_default_pings_enabled.short_description = "default pings"
_has_default_pings_enabled.boolean = True
def _ping_groups(self, obj):
return sorted([ping_group.name for ping_group in obj.ping_groups.all()])
def _corporation(self, obj):
return obj.corporation.corporation_name
_corporation.admin_order_field = "corporation__corporation_name"
def _alliance(self, obj):
if obj.corporation.alliance:
return obj.corporation.alliance.alliance_name
else:
return None
_alliance.admin_order_field = "corporation__alliance__alliance_name"
def _webhooks(self, obj):
names = sorted([webhook.name for webhook in obj.webhooks.all()])
if names:
return names
else:
return format_html(
'<span style="color: red"></i>Error: Notifications can not be sent, '
"because there is no webhook configured for this owner."
)
def _is_active(self, obj):
return obj.is_active
_is_active.boolean = True
_is_active.short_description = "active"
def _is_alliance_main(self, obj):
value = True if obj.is_alliance_main else None
return admin_boolean_icon_html(value)
_is_alliance_main.short_description = "alliance main"
def _is_sync_ok(self, obj):
if not obj.is_active:
return None
else:
return obj.are_all_syncs_ok
_is_sync_ok.boolean = True
_is_sync_ok.short_description = "sync ok"
def _notifications_count(self, obj: Owner) -> int:
return obj.notifications_count
_notifications_count.short_description = "notifications"
def _structures_count(self, obj: Owner) -> int:
return obj.structures_count
_structures_count.short_description = "structures"
actions = (
"update_all",
"update_structures",
"fetch_notifications",
"deactivate_owners",
"activate_owners",
)
def activate_owners(self, request, queryset):
queryset.update(is_active=True)
self.message_user(request, f"Activated {queryset.count()} owners")
activate_owners.short_description = "Activate selected owners"
def deactivate_owners(self, request, queryset):
queryset.update(is_active=False)
self.message_user(request, f"Deactivated {queryset.count()} owners")
deactivate_owners.short_description = "Deactivate selected owners"
def update_all(self, request, queryset):
for obj in queryset:
tasks.update_all_for_owner.delay(obj.pk, user_pk=request.user.pk)
text = (
f"Started updating structures and notifications for {obj}. "
"You will receive a notification once it is completed."
)
self.message_user(request, text)
update_all.short_description = "Update all from EVE server"
def update_structures(self, request, queryset):
for obj in queryset:
tasks.update_structures_for_owner.delay(obj.pk, user_pk=request.user.pk)
text = (
f"Started updating structures for {obj}. "
"You will receive a notification once it is completed."
)
self.message_user(request, text)
update_structures.short_description = "Update structures from EVE server"
def fetch_notifications(self, request, queryset):
for obj in queryset:
tasks.process_notifications_for_owner.delay(obj.pk, user_pk=request.user.pk)
text = (
f"Started fetching notifications for {obj}. "
"You will receive a notification once it is completed."
)
self.message_user(request, text)
fetch_notifications.short_description = "Fetch notifications from EVE server"
def has_add_permission(self, request):
return False
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.readonly_fields + (
"assets_last_update_at",
"assets_last_update_ok",
"corporation",
"forwarding_last_update_at",
"forwarding_last_update_ok",
"notifications_last_update_at",
"notifications_last_update_ok",
"structures_last_update_at",
"structures_last_update_ok",
"_avg_turnaround_time",
"_are_all_syncs_ok",
"_structures_last_update_fresh",
"_notifications_last_update_fresh",
"_forwarding_last_update_fresh",
"_assets_last_update_fresh",
)
return self.readonly_fields
inlines = (OwnerCharacterAdminInline,)
filter_horizontal = ("ping_groups",)
fieldsets = (
(
None,
{
"fields": (
"corporation",
"webhooks",
"is_alliance_main",
"are_pocos_public",
"has_default_pings_enabled",
"ping_groups",
"is_included_in_service_status",
"is_active",
)
},
),
(
"Sync Status",
{
"classes": ("collapse",),
"fields": (
"_are_all_syncs_ok",
(
"structures_last_update_ok",
"_structures_last_update_fresh",
"structures_last_update_at",
),
(
"notifications_last_update_ok",
"_notifications_last_update_fresh",
"notifications_last_update_at",
"_avg_turnaround_time",
),
(
"forwarding_last_update_ok",
"_forwarding_last_update_fresh",
"forwarding_last_update_at",
),
(
"assets_last_update_ok",
"_assets_last_update_fresh",
"assets_last_update_at",
),
),
},
),
)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""only show custom tags in dropdown"""
if db_field.name == "webhooks":
kwargs["queryset"] = Webhook.objects.filter(is_active=True)
return super().formfield_for_manytomany(db_field, request, **kwargs)
def _are_all_syncs_ok(self, obj):
return obj.are_all_syncs_ok
_are_all_syncs_ok.boolean = True
_are_all_syncs_ok.short_description = "All syncs OK"
def _avg_turnaround_time(self, obj) -> str:
"""Average time between timestamp of notifications an when they are received."""
def my_format(value) -> str:
return f"{value:,.0f}" if value else "-"
max_short = app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_SHORT
max_medium = app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_MEDIUM
max_long = app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_LONG
max_valid = app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_MAX_VALID
notifications = obj.notifications.filter(created__isnull=False).order_by(
"-timestamp"
)
data = [
(rec[0] - rec[1]).total_seconds()
for rec in notifications.values_list("created", "timestamp")
if (rec[0] - rec[1]).total_seconds() < max_valid
]
short = statistics.mean(data[:max_short]) if len(data) >= max_short else None
medium = statistics.mean(data[:max_medium]) if len(data) >= max_medium else None
long = statistics.mean(data[:max_long]) if len(data) >= max_long else None
return f"{my_format(short)} | {my_format(medium)} | {my_format(long)}"
_avg_turnaround_time.short_description = "Avg. turnaround time"
def _structures_last_update_fresh(self, obj) -> int:
return obj.is_structure_sync_fresh
_structures_last_update_fresh.boolean = True
_structures_last_update_fresh.short_description = "Last update fresh"
def _notifications_last_update_fresh(self, obj) -> int:
return obj.is_notification_sync_fresh
_notifications_last_update_fresh.boolean = True
_notifications_last_update_fresh.short_description = "Last update fresh"
def _forwarding_last_update_fresh(self, obj) -> int:
return obj.is_forwarding_sync_fresh
_forwarding_last_update_fresh.boolean = True
_forwarding_last_update_fresh.short_description = "Last update fresh"
def _assets_last_update_fresh(self, obj) -> int:
return obj.is_assets_sync_fresh
_assets_last_update_fresh.boolean = True
_assets_last_update_fresh.short_description = "Last update fresh"
def get_form(self, *args, **kwargs):
"""Add help text to custom field."""
help_texts = {
"_avg_turnaround_time": (
"For last %d | %d | %d notifications"
% (
app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_SHORT,
app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_MEDIUM,
app_settings.STRUCTURES_NOTIFICATION_TURNAROUND_LONG,
)
),
"_are_all_syncs_ok": (
"True when all syncs were successful and not older then "
"the respective grace period."
),
"_structures_last_update_fresh": (
"True when last sync within %s minutes."
% app_settings.STRUCTURES_STRUCTURE_SYNC_GRACE_MINUTES
),
"_notifications_last_update_fresh": (
"True when last sync within %s minutes."
% app_settings.STRUCTURES_NOTIFICATION_SYNC_GRACE_MINUTES
),
"_forwarding_last_update_fresh": (
"True when last sync within %s minutes."
% app_settings.STRUCTURES_NOTIFICATION_SYNC_GRACE_MINUTES
),
"_assets_last_update_fresh": (
"True when last sync within %s minutes."
% app_settings.STRUCTURES_STRUCTURE_SYNC_GRACE_MINUTES
),
}
kwargs.update({"help_texts": help_texts})
return super().get_form(*args, **kwargs)
@admin.register(StructureTag)
class StructureTagAdmin(admin.ModelAdmin):
list_display = (
"name",
"description",
"order",
"style",
"is_default",
"is_user_managed",
)
list_filter = (
"is_default",
"style",
"is_user_managed",
)
readonly_fields = ("is_user_managed",)
def has_delete_permission(self, request, obj=None):
return False if obj and not obj.is_user_managed else True
def has_change_permission(self, request, obj=None):
return False if obj and not obj.is_user_managed else True
class StructureAdminInline(admin.TabularInline):
model = StructureService
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class OwnerCorporationsFilter(admin.SimpleListFilter):
"""Custom filter to filter on corporations from owners only"""
title = "owner corporation"
parameter_name = "owner_corporation_id__exact"
def lookups(self, request, model_admin):
qs = (
EveCorporationInfo.objects.filter(structure_owner__isnull=False)
.values("corporation_id", "corporation_name")
.distinct()
.order_by(Lower("corporation_name"))
)
return tuple([(x["corporation_id"], x["corporation_name"]) for x in qs])
def queryset(self, request, qs):
if self.value() is None:
return qs.all()
else:
return qs.filter(owner__corporation__corporation_id=self.value())
class OwnerAllianceFilter(admin.SimpleListFilter):
"""Custom filter to filter on alliances from owners only"""
title = "owner alliance"
parameter_name = "owner_alliance_id__exact"
def lookups(self, request, model_admin):
qs = (
EveAllianceInfo.objects.filter(
evecorporationinfo__structure_owner__isnull=False
)
.values("alliance_id", "alliance_name")
.distinct()
.order_by(Lower("alliance_name"))
)
return tuple([(x["alliance_id"], x["alliance_name"]) for x in qs])
def queryset(self, request, qs):
if self.value() is None:
return qs.all()
else:
return qs.filter(owner__corporation__alliance__alliance_id=self.value())
@admin.register(Structure)
class StructureAdmin(admin.ModelAdmin):
show_full_result_count = True
list_select_related = (
"owner",
"owner__corporation",
"owner__corporation__alliance",
"eve_solar_system",
"eve_solar_system__eve_constellation__eve_region",
"eve_type",
"eve_type__eve_group",
"eve_type__eve_group__eve_category",
"eve_planet",
"eve_moon",
)
search_fields = [
"name",
"owner__corporation__corporation_name",
"eve_solar_system__name",
]
ordering = ["name"]
list_display = ("name", "_owner", "_location", "_type", "_power_mode", "_tags")
list_filter = (
OwnerCorporationsFilter,
OwnerAllianceFilter,
("eve_solar_system", admin.RelatedOnlyFieldListFilter),
(
"eve_solar_system__eve_constellation__eve_region",
admin.RelatedOnlyFieldListFilter,
),
("eve_type", admin.RelatedOnlyFieldListFilter),
("eve_type__eve_group", admin.RelatedOnlyFieldListFilter),
("eve_type__eve_group__eve_category", admin.RelatedOnlyFieldListFilter),
("tags", admin.RelatedOnlyFieldListFilter),
)
actions = ("add_default_tags", "remove_user_tags", "update_generated_tags")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("tags")
def _owner(self, structure):
alliance = structure.owner.corporation.alliance
return format_html(
"{}<br>{}",
structure.owner.corporation,
alliance if alliance else "",
)
def _location(self, structure):
if structure.eve_moon:
location_name = structure.eve_moon.name
elif structure.eve_planet:
location_name = structure.eve_planet.name
else:
location_name = structure.eve_solar_system.name
return format_html(
"{}<br>{}",
location_name,
structure.eve_solar_system.eve_constellation.eve_region,
)
def _type(self, structure):
return format_html("{}<br>{}", structure.eve_type, structure.eve_type.eve_group)
def _power_mode(self, structure):
return structure.get_power_mode_display()
def _tags(self, structure):
return sorted([tag.name for tag in structure.tags.all()])
_tags.short_description = "Tags"
def has_add_permission(self, request):
return False
def add_default_tags(self, request, queryset):
structure_count = 0
tags = StructureTag.objects.filter(is_default=True)
for structure in queryset:
for tag in tags:
structure.tags.add(tag)
structure_count += 1
self.message_user(
request,
"Added {:,} default tags to {:,} structures".format(
tags.count(), structure_count
),
)
add_default_tags.short_description = "Add default tags to selected structures"
def remove_user_tags(self, request, queryset):
structure_count = 0
for structure in queryset:
for tag in structure.tags.filter(is_user_managed=True):
structure.tags.remove(tag)
structure_count += 1
self.message_user(
request,
"Removed all user tags from {:,} structures".format(structure_count),
)
remove_user_tags.short_description = "Remove user tags for selected structures"
def update_generated_tags(self, request, queryset):
structure_count = 0
for structure in queryset:
structure.update_generated_tags(recreate_tags=True)
structure_count += 1
self.message_user(
request,
"Updated all generated tags for {:,} structures".format(structure_count),
)
update_generated_tags.short_description = (
"Update generated tags for selected structures"
)
readonly_fields = tuple(
[
x.name
for x in Structure._meta.get_fields()
if isinstance(x, models.fields.Field) and x.name not in ["tags"]
]
)
fieldsets = (
(None, {"fields": ("name", "owner", "eve_solar_system", "eve_type", "tags")}),
(
"Status",
{
"classes": ("collapse",),
"fields": (
"state",
(
"state_timer_start",
"state_timer_end",
),
"unanchors_at",
"fuel_expires_at",
"last_online_at",
"has_fitting",
"has_core",
),
},
),
(
"Reinforcement",
{
"classes": ("collapse",),
"fields": (
("reinforce_hour",),
(
"next_reinforce_hour",
"next_reinforce_apply",
),
),
},
),
(
"Position",
{
"classes": ("collapse",),
"fields": ("position_x", "position_y", "position_z"),
},
),
(
None,
{
"fields": (
(
"id",
"last_updated_at",
)
)
},
),
)
inlines = (StructureAdminInline,)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""only show custom tags in dropdown"""
if db_field.name == "tags":
kwargs["queryset"] = StructureTag.objects.filter(is_user_managed=True)
return super().formfield_for_manytomany(db_field, request, **kwargs)
@admin.register(Webhook)
class WebhookAdmin(admin.ModelAdmin):
ordering = ["name"]
list_display = (
"name",
"_ping_groups",
"_owners",
"is_active",
"_is_default",
"_messages_in_queue",
)
list_filter = ("is_active",)
save_as = True
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("ping_groups", "owner_set", "owner_set__corporation")
def _default_pings(self, obj):
return obj.has_default_pings_enabled
_default_pings.boolean = True
def _ping_groups(self, obj):
return sorted([ping_group.name for ping_group in obj.ping_groups.all()])
def _owners(self, obj):
return sorted([str(owner) for owner in obj.owner_set.all()])
_owners.short_description = "Enabled for Owners"
def _is_default(self, obj):
value = True if obj.is_default else None
return admin_boolean_icon_html(value)
def _messages_in_queue(self, obj):
return obj.queue_size()
actions = ("test_notification", "activate", "deactivate", "purge_messages")
def test_notification(self, request, queryset):
for obj in queryset:
tasks.send_test_notifications_to_webhook.delay(
obj.pk, user_pk=request.user.pk
)
self.message_user(
request,
'Initiated sending test notification to webhook "{}". '
"You will receive a report on completion.".format(obj),
)
test_notification.short_description = "Send test notification to selected webhooks"
def activate(self, request, queryset):
for obj in queryset:
obj.is_active = True
obj.save()
self.message_user(request, f'You have activated webhook "{obj}"')
activate.short_description = "Activate selected webhook"
def deactivate(self, request, queryset):
for obj in queryset:
obj.is_active = False
obj.save()
self.message_user(request, f'You have de-activated webhook "{obj}"')
deactivate.short_description = "Deactivate selected webhook"
def purge_messages(self, request, queryset):
actions_count = 0
killmails_deleted = 0
for webhook in queryset:
killmails_deleted += webhook.clear_queue()
actions_count += 1
self.message_user(
request,
f"Purged queued messages for {actions_count} webhooks, "
f"deleting a total of {killmails_deleted} messages.",
)
purge_messages.short_description = "Purge queued messages from selected webhooks"
filter_horizontal = ("ping_groups",)
fieldsets = (
(
None,
{
"fields": (
"name",
"url",
"notes",
"notification_types",
"ping_groups",
"is_active",
"is_default",
)
},
),
(
"Advanced Options",
{
"classes": ("collapse",),
"fields": (
"language_code",
"has_default_pings_enabled",
"webhook_type",
),
},
),
)
|
#!/usr/bin/env python3
from abc import ABC
from argparse import ArgumentParser
from datetime import datetime
from functools import wraps
from itertools import islice
from multiprocessing import Pool, Value
import time
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.concurrent import execute_concurrent_with_args
import numpy as np
import blocksci
# dict(zip(blocksci.address_type.types,
# range(1, len(blocksci.address_type.types) + 1)))
address_type = {
'address_type.nonstandard': 1,
'address_type.pubkey': 2,
'address_type.pubkeyhash': 3,
'address_type.multisig_pubkey': 4,
'address_type.scripthash': 5,
'address_type.multisig': 6,
'address_type.nulldata': 7,
'address_type.witness_pubkeyhash': 8,
'address_type.witness_scripthash': 9,
'address_type.witness_unknown': 10
}
def timing(f):
@wraps(f)
def wrap(*args, **kw):
t1 = datetime.now()
result = f(*args, **kw)
t2 = datetime.now()
print('\n... %s\n' % str(t2 - t1))
return result
return wrap
class QueryManager(ABC):
# chosen to match the default in execute_concurrent_with_args
concurrency = 100
counter = Value('d', 0)
def __init__(self, cluster, keyspace, chain, cql_str,
num_proc=1, num_chunks=None):
if not num_chunks:
num_chunks = num_proc
self.num_proc = num_proc
self.num_chunks = num_chunks
self.pool = Pool(processes=num_proc,
initializer=self._setup,
initargs=(cluster, chain, keyspace, cql_str))
@classmethod
def _setup(cls, cluster, chain, keyspace, cql_str):
cls.chain = chain
cls.session = cluster.connect()
cls.session.default_timeout = 60
cls.session.set_keyspace(keyspace)
cls.prepared_stmt = cls.session.prepare(cql_str)
def close_pool(self):
self.pool.close()
self.pool.join()
@timing
def execute(self, fun, params):
self.pool.map(fun, chunk(params, self.num_chunks))
@classmethod
def insert(cls, params):
pass
class TxQueryManager(QueryManager):
counter = Value('d', 0)
@classmethod
def insert(cls, params):
idx_start, idx_end = params
param_list = []
for index in range(idx_start, idx_end, cls.concurrency):
curr_batch_size = min(cls.concurrency, idx_end - index)
for i in range(0, curr_batch_size):
tx = blocksci.Tx(index + i, cls.chain)
param_list.append(tx_summary(tx))
results = execute_concurrent_with_args(
session=cls.session,
statement=cls.prepared_stmt,
parameters=param_list,
concurrency=cls.concurrency)
for (i, (success, _)) in enumerate(results):
if not success:
while True:
try:
tx = blocksci.Tx(index + i, cls.chain)
cls.session.execute(cls.prepared_stmt,
tx_summary(tx))
except Exception as e:
print(e)
continue
break
param_list = []
with cls.counter.get_lock():
cls.counter.value += curr_batch_size
print('#tx {:,.0f}'.format(cls.counter.value), end='\r')
class BlockTxQueryManager(QueryManager):
counter = Value('d', 0)
@classmethod
def insert(cls, params):
idx_start, idx_end = params
param_list = []
for index in range(idx_start, idx_end, cls.concurrency):
curr_batch_size = min(cls.concurrency, idx_end - index)
for i in range(0, curr_batch_size):
block = cls.chain[index + i]
block_tx = [block.height, [tx_stats(x) for x in block.txes]]
param_list.append(block_tx)
results = execute_concurrent_with_args(
session=cls.session,
statement=cls.prepared_stmt,
parameters=param_list,
concurrency=cls.concurrency)
for (i, (success, _)) in enumerate(results):
if not success:
while True:
try:
block = cls.chain[index + i]
block_tx = [block.height,
[tx_stats(x) for x in block.txes]]
cls.session.execute(cls.prepared_stmt,
block_tx)
except Exception as e:
print(e)
continue
break
param_list = []
with cls.counter.get_lock():
cls.counter.value += curr_batch_size
print('#blocks {:,.0f}'.format(cls.counter.value), end='\r')
@timing
def insert(cluster, keyspace, cql_stmt, generator, concurrency=100):
session = cluster.connect(keyspace)
session.default_timeout = 60
session.default_consistency_level = ConsistencyLevel.LOCAL_ONE
prepared_stmt = session.prepare(cql_stmt)
values = take(concurrency, generator)
count = 0
while values:
results = execute_concurrent_with_args(
session=session,
statement=prepared_stmt,
parameters=values,
concurrency=concurrency)
for (i, (success, _)) in enumerate(results):
if not success:
while True:
try:
session.execute(prepared_stmt, values[i])
except Exception as e:
print(e)
continue
break
values = take(concurrency, generator)
if (count % 1e3) == 0:
print('#blocks {:,.0f}'.format(count), end='\r')
count += concurrency
def take(n, iterable):
'''Return first n items of the iterable as a list
>>> take(0, [1, 2])
[]
>>> take(1, [1, 2])
[1]
>>> take(2, [1, 2])
[1, 2]
>>> take(3, [1, 2])
[1, 2]
'''
return list(islice(iterable, n))
def chunk(val_range, k):
'''Split the number range val_range=[n1, n2] into k evenly sized chunks
>>> chunk([0, 1], 1)
[(0, 1)]
>>> chunk([0, 4], 4)
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> chunk([0, 5], 4)
[(0, 2), (2, 3), (3, 4), (4, 5)]
'''
n1, n2 = val_range
assert n2 > n1
n = n2 - n1
assert 0 < k <= n
s, r = divmod(n, k)
t = s + 1
return ([(n1+p, n1+p+t) for p in range(0, r*t, t)] +
[(n1+p, n1+p+s) for p in range(r*t, n, s)])
def addr_str(addr_obj):
if addr_obj.type == blocksci.address_type.multisig:
res = [x.address_string for x in addr_obj.addresses]
elif addr_obj.type == blocksci.address_type.nonstandard:
res = None
elif addr_obj.type == blocksci.address_type.nulldata:
res = None
elif addr_obj.type == blocksci.address_type.witness_unknown:
res = None
else:
res = [addr_obj.address_string]
return res
def block_summary(block):
return (block.height,
bytearray.fromhex(str(block.hash)),
block.timestamp,
len(block))
def tx_stats(tx):
return (bytearray.fromhex(str(tx.hash)),
len(tx.inputs),
len(tx.outputs),
tx.input_value,
tx.output_value)
def tx_io_summary(x):
return [addr_str(x.address), x.value, address_type[repr(x.address_type)]]
def tx_summary(tx):
tx_inputs = [tx_io_summary(x) for x in tx.inputs]
tx_outputs = [tx_io_summary(x) for x in tx.outputs]
return (str(tx.hash)[:5],
bytearray.fromhex(str(tx.hash)),
tx.index,
tx.block_height,
int(tx.block_time.timestamp()),
tx.is_coinbase,
tx.input_value,
tx.output_value,
list(tx_inputs),
list(tx_outputs),
blocksci.heuristics.is_coinjoin(tx))
def insert_summary_stats(cluster, keyspace, last_block):
total_blocks = last_block.height + 1
total_txs = last_block.txes[-1].index + 1
timestamp = last_block.timestamp
session = cluster.connect(keyspace)
cql_str = '''INSERT INTO summary_statistics
(id, timestamp, no_blocks, no_txs)
VALUES (%s, %s, %s, %s)'''
session.execute(cql_str, (keyspace, timestamp, total_blocks, total_txs))
def main():
parser = ArgumentParser(description='Export dumped BlockSci data '
'to Apache Cassandra',
epilog='GraphSense - http://graphsense.info')
parser.add_argument('-c', '--config', dest='blocksci_config',
required=True,
help='BlockSci configuration file')
parser.add_argument('-d', '--db_nodes', dest='db_nodes', nargs='+',
default='localhost', metavar='DB_NODE',
help='list of Cassandra nodes; default "localhost")')
parser.add_argument('-k', '--keyspace', dest='keyspace',
required=True,
help='Cassandra keyspace')
parser.add_argument('--processes', dest='num_proc',
type=int, default=1,
help='number of processes (default 1)')
parser.add_argument('--chunks', dest='num_chunks',
type=int,
help='number of chunks to split tx/block range '
'(default `NUM_PROC`)')
parser.add_argument('-p', '--previous_day', dest='prev_day',
action='store_true',
help='only ingest blocks up to the previous day, '
'since currency exchange rates might not be '
'available for the current day.')
parser.add_argument('--start_index', dest='start_index',
type=int, default=0,
help='start index of the blocks to export '
'(default 0)')
parser.add_argument('--end_index', dest='end_index',
type=int, default=-1,
help='only blocks with height smaller than '
'this value are included; a negative index '
'counts back from the end (default -1)')
parser.add_argument('--blocks', action='store_true',
help='ingest only into the blocks table')
parser.add_argument('--block_tx', action='store_true',
help='ingest only into the block_transactions table')
parser.add_argument('--tx', action='store_true',
help='ingest only into the transactions table')
parser.add_argument('--statistics', action='store_true',
help='ingest only into the summary statistics table')
args = parser.parse_args()
chain = blocksci.Blockchain(args.blocksci_config)
print('Last parsed block: %d (%s)' %
(chain[-1].height, datetime.strftime(chain[-1].time, '%F %T')))
block_range = chain[args.start_index:args.end_index]
if args.start_index >= len(chain):
print('Error: --start_index argument must be smaller than %d' %
len(chain))
raise SystemExit
if not args.num_chunks:
args.num_chunks = args.num_proc
if args.prev_day:
tstamp_today = time.mktime(datetime.today().date().timetuple())
block_tstamps = block_range.time.astype(datetime)/1e9
v = np.where(block_tstamps < tstamp_today)[0]
if len(v):
last_index = np.max(v)
last_height = block_range[last_index].height
if last_height + 1 != chain[args.end_index].height:
print('Discarding blocks %d ... %d' %
(last_height + 1, chain[args.end_index].height))
block_range = chain[args.start_index:(last_height + 1)]
else:
print('No blocks to ingest.')
raise SystemExit
num_blocks = len(block_range)
block_index_range = (block_range[0].height, block_range[-1].height + 1)
tx_index_range = (block_range[0].txes[0].index,
block_range[-1].txes[-1].index + 1)
num_tx = tx_index_range[1] - tx_index_range[0] + 1
cluster = Cluster(args.db_nodes)
all_tables = not (args.blocks or args.block_tx or
args.tx or args.statistics)
# transactions
if all_tables or args.tx:
print('Transactions ({:,.0f} tx)'.format(num_tx))
print('tx index: {:,.0f} -- {:,.0f}'.format(*tx_index_range))
cql_str = '''INSERT INTO transaction
(tx_prefix, tx_hash, tx_index, height,
timestamp, coinbase, total_input, total_output,
inputs, outputs, coinjoin)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
qm = TxQueryManager(cluster, args.keyspace, chain, cql_str,
args.num_proc, args.num_chunks)
qm.execute(TxQueryManager.insert, tx_index_range)
qm.close_pool()
# block transactions
if all_tables or args.block_tx:
print('Block transactions ({:,.0f} blocks)'.format(num_blocks))
print('block index: {:,.0f} -- {:,.0f}'.format(*block_index_range))
cql_str = '''INSERT INTO block_transactions
(height, txs) VALUES (?, ?)'''
qm = BlockTxQueryManager(cluster, args.keyspace, chain, cql_str,
args.num_proc, args.num_chunks)
qm.execute(BlockTxQueryManager.insert, block_index_range)
qm.close_pool()
# blocks
if all_tables or args.blocks:
print('Blocks ({:,.0f} blocks)'.format(num_blocks))
print('block index: {:,.0f} -- {:,.0f}'.format(*block_index_range))
cql_str = '''INSERT INTO block
(height, block_hash, timestamp, no_transactions)
VALUES (?, ?, ?, ?)'''
generator = (block_summary(x) for x in block_range)
insert(cluster, args.keyspace, cql_str, generator, 100)
if all_tables or args.statistics:
insert_summary_stats(cluster,
args.keyspace,
chain[block_range[-1].height])
cluster.shutdown()
if __name__ == '__main__':
main()
|
import DistributedLawnDecor
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.ShowBase import *
from direct.interval.IntervalGlobal import *
import GardenGlobals
from toontown.toonbase import TTLocalizer
from toontown.estate import PlantingGUI
from toontown.estate import PlantTreeGUI
from toontown.estate import ToonStatueSelectionGUI
from toontown.toontowngui import TTDialog
from panda3d.core import Vec4, NodePath
import types
class DistributedGardenPlot(DistributedLawnDecor.DistributedLawnDecor):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGardenPlot')
def __init__(self, cr):
DistributedLawnDecor.DistributedLawnDecor.__init__(self, cr)
self.plantPath = NodePath('plantPath')
self.plantPath.reparentTo(self)
self.plotScale = 1.0
self.plantingGuiDoneEvent = 'plantingGuiDone'
self.toonStatueSelectionDoneEvent = 'toonStatueSelectionDone'
self.defaultModel = 'phase_5.5/models/estate/dirt_mound'
self.colorScaler = Vec4(1, 1, 1, 1)
self.plantingGui = None
return
def delete(self):
if self.plantingGui:
self.plantingGui.destroy()
self.plantingGui = None
DistributedLawnDecor.DistributedLawnDecor.delete(self)
return
def announceGenerate(self):
self.plotType = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot)
self.stickUp = 0.0
if self.getOwnerId() != localAvatar.doId:
self.defaultModel = None
elif self.plotType == GardenGlobals.FLOWER_TYPE:
self.collSphereRadius = 2.0
self.collSphereOffset = 0.0
self.plotScale = 0.7
self.stickUp = 1.1
elif self.plotType == GardenGlobals.GAG_TREE_TYPE:
self.collSphereRadius = 3.0
self.plotScale = 1.5
self.colorScaler = Vec4(1.0, 1.0, 1.0, 1)
elif self.plotType == GardenGlobals.STATUARY_TYPE:
self.collSphereRadius = 3.0
self.plotScale = 0.075
self.stickUp = -0.0
self.defaultModel = 'phase_5.5/models/estate/garden_slab'
else:
self.collSphereOffset = 0.0
self.notify.debug('announceGenerate')
DistributedLawnDecor.DistributedLawnDecor.announceGenerate(self)
return
def loadModel(self):
self.rotateNode = self.plantPath.attachNewNode('rotate')
self.model = None
if self.defaultModel:
self.model = loader.loadModel(self.defaultModel)
if type(self.plotScale) == types.TupleType:
self.model.setScale(*self.plotScale)
else:
self.model.setScale(self.plotScale)
self.model.reparentTo(self.rotateNode)
self.model.setColorScale(self.colorScaler)
self.stick2Ground()
return
def setupShadow(self):
pass
def getShovelCommand(self):
return self.plantSomething
def getShovelAction(self):
return self.getPlantingText()
def handleEnterPlot(self, entry=None):
dist = self.getDistance(localAvatar)
if self.canBePlanted():
base.localAvatar.addShovelRelatedDoId(self.doId)
def handleExitPlot(self, entry=None):
DistributedLawnDecor.DistributedLawnDecor.handleExitPlot(self, entry)
base.localAvatar.removeShovelRelatedDoId(self.doId)
def getPlantingText(self):
plantText = 'hardcoding'
if self.canBePlanted():
whatCanBePlanted = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot)
plantText = TTLocalizer.GardeningPlant
if whatCanBePlanted == GardenGlobals.INVALID_TYPE:
self.notify.warning('whatCanBePlanted returned INVALID_TYPE for %d %d' % (self.ownerIndex, self.plot))
elif whatCanBePlanted == GardenGlobals.FLOWER_TYPE:
plantText = TTLocalizer.GardeningPlantFlower
elif whatCanBePlanted == GardenGlobals.GAG_TREE_TYPE:
plantText = TTLocalizer.GardeningPlantTree
elif whatCanBePlanted == GardenGlobals.STATUARY_TYPE:
plantText = TTLocalizer.GardeningPlantItem
return plantText
def canBePlanted(self):
retval = True
if not base.localAvatar.doId == self.getOwnerId():
retval = False
return retval
def plantSomething(self):
whatCanBePlanted = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot)
if whatCanBePlanted == GardenGlobals.INVALID_TYPE:
self.notify.warning('whatCanBePlanted returned INVALID_TYPE for %d %d' % (self.ownerIndex, self.plot))
elif whatCanBePlanted == GardenGlobals.FLOWER_TYPE:
self.popupFlowerPlantingGui()
self.startInteraction()
elif whatCanBePlanted == GardenGlobals.GAG_TREE_TYPE:
self.popupTreePlantingGui()
self.startInteraction()
elif whatCanBePlanted == GardenGlobals.STATUARY_TYPE:
self.popupItemPlantingGui()
self.startInteraction()
def __handleFlowerPlantingDone(self, willPlant=0, recipeStr='', special=-1):
self.ignore(self.plantingGuiDoneEvent)
self.ignore('stoppedAsleep')
self.plantingGui.destroy()
self.plantingGui = None
base.localAvatar.showGardeningGui()
base.localAvatar.removeShovelRelatedDoId(self.doId)
successPlanting = False
if willPlant:
recipeKey = GardenGlobals.getRecipeKey(recipeStr, special)
if recipeKey >= 0:
species, variety = GardenGlobals.getSpeciesVarietyGivenRecipe(recipeKey)
if species >= 0 and variety >= 0:
self.sendUpdate('plantFlower', [species, variety])
successPlanting = True
else:
self.notify.debug('%s %d is not a valid recipe' % (recipeStr, special))
burntBeans = len(recipeStr)
self.sendUpdate('plantNothing', [burntBeans])
if successPlanting:
flowerName = GardenGlobals.getFlowerVarietyName(species, variety)
stringToShow = TTLocalizer.getResultPlantedSomethingSentence(flowerName)
elif willPlant:
self.resultDialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.ResultPlantedNothing, command=self.popupFlowerPlantingGuiAgain)
else:
self.finishInteraction()
return
def popupFlowerPlantingGui(self):
base.localAvatar.hideGardeningGui()
self.acceptOnce(self.plantingGuiDoneEvent, self.__handleFlowerPlantingDone)
self.plantingGui = PlantingGUI.PlantingGUI(self.plantingGuiDoneEvent)
self.accept('stoppedAsleep', self.__handleFlowerPlantingDone)
def resultsCallback(self, value):
self.notify.debug('value=%d' % value)
self.resultDialog.destroy()
self.resultDialog = None
self.finishInteraction()
return
def popupFlowerPlantingGuiAgain(self, value):
self.notify.debug('value=%d' % value)
self.resultDialog.destroy()
self.resultDialog = None
self.popupFlowerPlantingGui()
return
def popupItemPlantingGuiAgain(self, value):
self.notify.debug('value=%d' % value)
self.resultDialog.destroy()
self.resultDialog = None
self.popupItemPlantingGui()
return
def __handleItemPlantingDone(self, willPlant=0, recipeStr='', selectedSpecial=-1):
self.ignore(self.plantingGuiDoneEvent)
self.ignore('stoppedAsleep')
self.plantingGui.destroy()
self.plantingGui = None
base.localAvatar.showGardeningGui()
base.localAvatar.removeShovelRelatedDoId(self.doId)
gardenSpecials = base.localAvatar.getGardenSpecials()
special = -1
if selectedSpecial >= 0:
special = gardenSpecials[selectedSpecial][0]
successPlanting = False
successToonStatue = False
if willPlant:
recipeKey = GardenGlobals.getRecipeKey(recipeStr, special)
if recipeKey >= 0:
species, variety = GardenGlobals.getSpeciesVarietyGivenRecipe(recipeKey)
if species >= 0 and variety >= 0:
if GardenGlobals.PlantAttributes[species]['plantType'] == GardenGlobals.STATUARY_TYPE:
successPlanting = True
if species >= 205 and species <= 208:
successToonStatue = True
else:
self.sendUpdate('plantStatuary', [species])
else:
self.notify.debug('%s %d is not a valid recipe' % (recipeStr, special))
burntBeans = len(recipeStr)
self.sendUpdate('plantNothing', [burntBeans])
if successPlanting:
itemName = GardenGlobals.PlantAttributes[species]['name']
stringToShow = TTLocalizer.getResultPlantedSomethingSentence(itemName)
elif willPlant:
self.resultDialog = TTDialog.TTDialog(style=TTDialog.Acknowledge, text=TTLocalizer.ResultPlantedNothing, command=self.popupItemPlantingGuiAgain)
else:
self.finishInteraction()
if successToonStatue:
self.popupToonStatueSelectionGui(species)
return
def popupItemPlantingGui(self):
base.localAvatar.hideGardeningGui()
self.acceptOnce(self.plantingGuiDoneEvent, self.__handleItemPlantingDone)
self.plantingGui = PlantingGUI.PlantingGUI(self.plantingGuiDoneEvent, True)
self.plantingGui.showFirstSpecial()
self.accept('stoppedAsleep', self.__handleItemPlantingDone)
def popupToonStatueSelectionGui(self, species):
base.localAvatar.hideGardeningGui()
self.acceptOnce(self.toonStatueSelectionDoneEvent, self.__handleToonStatueSelectionDone, extraArgs=[species])
self.toonStatueSelectionGui = ToonStatueSelectionGUI.ToonStatueSelectionGUI(self.toonStatueSelectionDoneEvent, True)
self.accept('stoppedAsleep', self.__handleToonStatueSelectionDone)
def popupToonStatueSelectionGuiAgain(self, species):
self.resultDialog.destroy()
self.resultDialog = None
self.popupToonStatueSelectionGui(species)
return
def __handleToonStatueSelectionDone(self, species, willPlant=0, recipeStr='', dnaCode=-1):
self.ignore(self.toonStatueSelectionDoneEvent)
self.ignore('stoppedAsleep')
self.toonStatueSelectionGui.destroy()
self.toonStatueSelectionGui = None
base.localAvatar.showGardeningGui()
base.localAvatar.removeShovelRelatedDoId(self.doId)
if willPlant:
self.sendUpdate('plantToonStatuary', [species, dnaCode])
else:
self.popupItemPlantingGui()
return
def popupTreePlantingGui(self):
base.localAvatar.hideGardeningGui()
self.acceptOnce(self.plantingGuiDoneEvent, self.__handleTreePlantingDone)
self.plantingGui = PlantTreeGUI.PlantTreeGUI(self.plantingGuiDoneEvent)
self.accept('stoppedAsleep', self.__handleTreePlantingDone)
def __handleTreePlantingDone(self, willPlant=False, gagTrack=None, gagLevel=None):
self.ignore(self.plantingGuiDoneEvent)
self.ignore('stoppedAsleep')
self.plantingGui.destroy()
self.plantingGui = None
base.localAvatar.showGardeningGui()
base.localAvatar.removeShovelRelatedDoId(self.doId)
if willPlant:
self.sendUpdate('plantGagTree', [gagTrack, gagLevel])
else:
self.finishInteraction()
return
def setMovie(self, mode, avId):
if mode == GardenGlobals.MOVIE_PLANT:
self.doPlaceItemTrack(avId)
elif mode == GardenGlobals.MOVIE_FINISHREMOVING:
self.doFinishRemovingTrack(avId)
elif mode == GardenGlobals.MOVIE_PLANT_REJECTED:
self.doPlantRejectedTrack(avId)
def doPlantRejectedTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
self.movie = Sequence()
self.movie.append(Func(toon.detachShovel))
self.movie.append(Func(toon.loop, 'neutral'))
if avId == localAvatar.doId:
self.movie.append(Func(self.finishInteraction))
self.movie.append(Func(self.movieDone))
self.movie.start()
def doFinishRemovingTrack(self, avId):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
self.movie = Sequence()
self.movie.append(Func(toon.detachShovel))
if self.model:
pos = self.model.getPos()
pos.setZ(pos[2] - 1)
animProp = LerpPosInterval(self.model, 3, self.model.getPos(), pos)
shrinkProp = LerpScaleInterval(self.model, 3, scale=self.plotScale, startScale=0.01)
objAnimShrink = ParallelEndTogether(animProp, shrinkProp)
self.movie.append(objAnimShrink)
self.movie.append(self.stopCamIval(avId))
self.movie.append(Func(toon.loop, 'neutral'))
if avId == localAvatar.doId:
self.movie.append(Func(self.finishInteraction))
self.movie.append(Func(self.movieDone))
self.movie.start()
def doPlaceItemTrack(self, avId, item=None):
toon = base.cr.doId2do.get(avId)
if not toon:
return
self.finishMovies()
if avId == localAvatar.doId:
self.startInteraction()
shovel = toon.attachShovel()
shovel.hide()
moveTrack = self.generateToonMoveTrack(toon)
placeItemTrack = self.generatePlaceItemTrack(toon, item)
self.movie = Sequence(self.startCamIval(avId), moveTrack, Func(shovel.show), placeItemTrack)
if avId == localAvatar.doId:
self.expectingReplacement = 1
self.movie.append(Func(self.movieDone))
self.movie.start()
def generatePlaceItemTrack(self, toon, item):
sound = loader.loadSfx('phase_5.5/audio/sfx/burrow.ogg')
sound.setPlayRate(0.5)
placeItemTrack = Parallel()
placeItemTrack.append(Sequence(ActorInterval(toon, 'start-dig'), Parallel(ActorInterval(toon, 'loop-dig', loop=1, duration=5.13), Sequence(Wait(0.25), SoundInterval(sound, node=toon, duration=0.55), Wait(0.8), SoundInterval(sound, node=toon, duration=0.55), Wait(1.35), SoundInterval(sound, node=toon, duration=0.55))), ActorInterval(toon, 'start-dig', playRate=-1), Func(toon.loop, 'neutral'), Func(toon.detachShovel)))
if self.model:
pos = self.model.getPos()
pos.setZ(pos[2] - 1)
animProp = LerpPosInterval(self.model, 3, pos)
shrinkProp = LerpScaleInterval(self.model, 3, scale=0.01, startScale=self.model.getScale())
objAnimShrink = ParallelEndTogether(animProp, shrinkProp)
placeItemTrack.append(objAnimShrink)
if item:
placeItemTrack.append(Sequence(Func(item.reparentTo, toon.rightHand), Wait(0.55), Func(item.wrtReparentTo, render), Parallel(LerpHprInterval(item, hpr=self.getHpr(render), duration=1.2), ProjectileInterval(item, endPos=self.getPos(render), duration=1.2, gravityMult=0.45)), Func(item.removeNode)))
return placeItemTrack
def makeMovieNode(self):
if self.plotType == GardenGlobals.FLOWER_TYPE:
self.movieNode = self.rotateNode.attachNewNode('moviePos')
self.movieNode.setPos(0, 3, 0)
self.movieNode.setH(180)
self.stick2Ground()
else:
DistributedLawnDecor.DistributedLawnDecor.makeMovieNode(self) |
<reponame>py4/SFUTranslate
"""
The class in charge of padding , batching, and post-processing of the created instances in the dataset reader
"""
from typing import Union, List, Tuple
from translate.readers.constants import InstancePartType
from translate.readers.datareader import AbsDatasetReader
from translate.backend.utils import device, backend, DataLoader, zeros_tensor
__author__ = "<NAME>"
def _pad_transform_id_list(id_list: Union[List, backend.Tensor], max_length: int, pad_token_id: int) -> backend.Tensor:
"""
Receives the word-indices [integer/long type] and converts them into a backend tensor
"""
assert len(id_list) > 0
assert type(id_list[0]) is not list
if type(id_list) == list:
id_list.extend([pad_token_id] * (max_length - len(id_list)))
result = backend.LongTensor(id_list, device=device)
if backend.cuda.is_available():
return result.cuda()
else:
return result
else:
result = id_list.view(-1)
pad_size = max_length - result.size(0)
if pad_size > 0:
pad_vec = backend.ones(pad_size).long().to(device) * pad_token_id
result = backend.cat([result, pad_vec], dim=0)
if backend.cuda.is_available():
return result.cuda()
else:
return result
def _pad_transform_list_list_id(id_list: List[List[int]], max_length: int, pad_token_id: int,
row_wise_padding: bool = False) -> backend.Tensor:
"""
Receives a list of list of word-indices [integer/long type] and converts them into a backend tensor
"""
result = backend.stack([_pad_transform_id_list(x, max_length, pad_token_id) for x in id_list], dim=0)
if len(id_list) < max_length and row_wise_padding:
return backend.cat([result, zeros_tensor(1, max_length - len(id_list), max_length).long().squeeze(0)], dim=0)
else:
return result
def _pad_transform_embedding_matrix(embedding_matrix: backend.Tensor, max_length: int) -> backend.Tensor:
"""
:param embedding_matrix: A matrix of size [SentenceLength + 1, EmbeddingSize]
The +1 part is because the embedding is always ended in embedding vector of pad_word
:param max_length: the max length to which the matrix is supposed to be padded to
"""
assert embedding_matrix.dim() == 2
result = embedding_matrix[:-1]
pad_size = max_length - result.size(0)
if pad_size > 0:
result = backend.cat([result, embedding_matrix[-1].repeat(pad_size, 1)])
if backend.cuda.is_available():
return result.cuda()
else:
return result
def get_padding_batch_loader(dataset_instance: AbsDatasetReader, batch_size: int) -> DataLoader:
"""
:returns a DataLoader which takes single instances from :param dataset_instance:, batches them into batches of
size :param batch_size:, pads them and returns the batches in the format of an iterator
"""
return DataLoader(dataset_instance, batch_size=batch_size,
collate_fn=PadCollate(pad_index_e=dataset_instance.target_vocabulary.get_pad_word_index(),
pad_index_f=dataset_instance.source_vocabulary.get_pad_word_index(),
instance_schema=dataset_instance.instance_schema))
class PadCollate:
"""
a variant of callate_fn that pads according to the longest sequence in a batch of sequences
"""
def __init__(self, pad_index_f: int, pad_index_e: int, instance_schema: Tuple):
"""
receives the padding indices which will be used to pad the tensors
"""
self.pad_index_e = pad_index_e
self.pad_index_f = pad_index_f
self.instance_schema = instance_schema
@staticmethod
def get_item_length(id_list: Union[backend.Tensor, List], schema_type: InstancePartType) -> int:
"""
given a list or a tensor, the function will detect the batch size in it and will return it
"""
if schema_type == InstancePartType.Tensor and id_list.size(0) == 1:
return id_list.view(-1).size(0)
elif schema_type == InstancePartType.Tensor:
raise NotImplementedError
elif schema_type == InstancePartType.ListId:
return len(id_list)
elif schema_type == InstancePartType.TransformerSrcMask or \
schema_type == InstancePartType.TransformerTgtMask:
return len(id_list[0])
else:
raise NotImplementedError("Unknown schema type {}".format(schema_type))
def pad_collate(self, batch) -> Tuple:
"""
the function to perform the padding + batching + conversion of final resulting batch to a tensor
:param batch: a batch of Tuples of inputs
every single input Tuple can contain a number of list (tensors) of ids
"""
result = None
for ind, schema_type in enumerate(self.instance_schema):
max_len = max(map(lambda x: self.get_item_length(x[ind], schema_type), batch))
pad_index = 0
if ind == 0:
pad_index = self.pad_index_f
elif ind == 1:
pad_index = self.pad_index_e
if schema_type == InstancePartType.ListId:
res = backend.stack([x for x in map(lambda p: (
_pad_transform_id_list(p[ind], max_len, pad_index)), batch)], dim=0)
elif schema_type == InstancePartType.TransformerSrcMask or \
schema_type == InstancePartType.TransformerTgtMask:
res = backend.stack([x for x in map(lambda p: (_pad_transform_list_list_id(
p[ind], max_len, pad_index, schema_type == InstancePartType.TransformerTgtMask)), batch)], dim=0)
else:
raise NotImplementedError
if result is None:
result = res,
else:
result = *result, res
return result
def __call__(self, batch):
return self.pad_collate(batch)
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
zine.database
~~~~~~~~~~~~~
This module is a rather complex layer on top of SQLAlchemy 0.4.
Basically you will never use the `zine.database` module except you
are a core developer, but always the high level
:mod:`~zine.database.db` module which you can import from the
:mod:`zine.api` module.
:copyright: (c) 2009 by the Zine Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import urlparse
from os import path
from cPickle import loads as load_pickle
from struct import error
from datetime import datetime, timedelta
from types import ModuleType
import sqlalchemy
from sqlalchemy import orm
from sqlalchemy.exc import ArgumentError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.util import to_list
from sqlalchemy.engine.url import make_url, URL
from sqlalchemy.types import MutableType, TypeDecorator
from sqlalchemy.ext.associationproxy import association_proxy
from werkzeug import url_decode
from werkzeug.exceptions import NotFound
from zine.utils import local_manager, load_json, dump_json
_sqlite_re = re.compile(r'sqlite:(?:(?://(.*?))|memory)(?:\?(.*))?$')
def get_engine():
"""Return the active database engine (the database engine of the active
application). If no application is enabled this has an undefined behavior.
If you are not sure if the application is bound to the active thread, use
:func:`~zine.application.get_application` and check it for `None`.
The database engine is stored on the application object as `database_engine`.
"""
from zine.application import get_application
return get_application().database_engine
def create_engine(uri, relative_to=None, echo=False):
"""Create a new engine. This works a bit like SQLAlchemy's
`create_engine` with the difference that it automaticaly set's MySQL
engines to 'utf-8', and paths for SQLite are relative to the path
provided as `relative_to`.
Furthermore the engine is created with `convert_unicode` by default.
"""
# special case sqlite. We want nicer urls for that one.
if uri.startswith('sqlite:'):
match = _sqlite_re.match(uri)
if match is None:
raise ArgumentError('Could not parse rfc1738 URL')
database, query = match.groups()
if database is None:
database = ':memory:'
elif relative_to is not None:
database = path.join(relative_to, database)
if query:
query = url_decode(query).to_dict()
else:
query = {}
info = URL('sqlite', database=database, query=query)
else:
info = make_url(uri)
# if mysql is the database engine and no connection encoding is
# provided we set it to utf-8
if info.drivername == 'mysql':
info.query.setdefault('charset', 'utf8')
options = {'convert_unicode': True, 'echo': echo}
# alternative pool sizes / recycle settings and more. These are
# interpreter wide and not from the config for the following reasons:
#
# - system administrators can set it independently from the webserver
# configuration via SetEnv and friends.
# - this setting is deployment dependent should not affect a development
# server for the same instance or a development shell
for key in 'pool_size', 'pool_recycle', 'pool_timeout':
value = os.environ.get('ZINE_DATABASE_' + key.upper())
if value is not None:
options[key] = int(value)
return sqlalchemy.create_engine(info, **options)
def secure_database_uri(uri):
"""Returns the database uri with confidental information stripped."""
obj = make_url(uri)
if obj.password:
obj.password = '***'
return unicode(obj).replace(':%2A%2A%2A@', ':***@')
def attribute_loaded(model, attribute):
"""Returns true if the attribute of the model was already loaded."""
# XXX: this works but it relys on a specific implementation in
# SQLAlchemy. Figure out if SA provides a way to query that information.
return attribute in model.__dict__
class ZEMLParserData(MutableType, TypeDecorator):
"""Holds parser data."""
impl = sqlalchemy.Binary
def process_bind_param(self, value, dialect):
if value is None:
return
from zine.utils.zeml import dump_parser_data
return dump_parser_data(value)
def process_result_value(self, value, dialect):
from zine.utils.zeml import load_parser_data
try:
return load_parser_data(value)
except (ValueError, error): # Parser data invalid. Database corruption?
from zine.i18n import _
from zine.utils import log
log.exception(_(u'Error when loading parsed data from database. '
u'Maybe the database was manually edited and got '
u'corrupted? The system returned an empty value.'))
return {}
def copy_value(self, value):
from copy import deepcopy
return deepcopy(value)
class JsonDictPickleFallback(MutableType, TypeDecorator):
"""
Stores as JSON and loads from JSON, with pickle fallback for compatibility
with older Zine installations."""
impl = sqlalchemy.Binary
def process_result_value(self, value, dialect):
if value is None:
return None
else:
try:
# the extra str() call is for databases like postgres that
# insist on using buffers for binary data.
return load_json(str(value))
except ValueError:
try:
return load_pickle(str(value))
except ValueError:
# Database corrupted? Return raw data
return {'dump': str(value)}
def process_bind_param(self, value, dialect):
if value is None:
return None
else:
return dump_json(value)
def copy_value(self, value):
from copy import deepcopy
return deepcopy(value)
class Query(orm.Query):
"""Default query class."""
def first(self, raise_if_missing=False):
"""Return the first result of this `Query` or None if the result
doesn't contain any row. If `raise_if_missing` is set to `True`
a `NotFound` exception is raised if no row is found.
"""
rv = orm.Query.first(self)
if rv is None and raise_if_missing:
raise NotFound()
return rv
session = orm.scoped_session(lambda: orm.create_session(get_engine(),
autoflush=True, autocommit=False),
local_manager.get_ident)
# configure a declarative base. This is unused in the code but makes it easier
# for plugins to work with the database.
class ModelBase(object):
"""Internal baseclass for `Model`."""
Model = declarative_base(name='Model', cls=ModelBase, mapper=session.mapper)
ModelBase.query = session.query_property(Query)
#: create a new module for all the database related functions and objects
sys.modules['zine.database.db'] = db = ModuleType('db')
key = value = mod = None
for mod in sqlalchemy, orm:
for key, value in mod.__dict__.iteritems():
if key in mod.__all__:
setattr(db, key, value)
del key, mod, value
#: forward some session methods to the module as well
for name in 'delete', 'save', 'flush', 'execute', 'begin', 'mapper', \
'commit', 'rollback', 'clear', 'refresh', 'expire', \
'query_property':
setattr(db, name, getattr(session, name))
#: and finally hook our own implementations of various objects in
db.Model = Model
db.Query = Query
db.get_engine = get_engine
db.create_engine = create_engine
db.session = session
db.ZEMLParserData = ZEMLParserData
db.JsonDictPickleFallback = JsonDictPickleFallback
db.mapper = session.mapper
db.association_proxy = association_proxy
db.attribute_loaded = attribute_loaded
#: called at the end of a request
cleanup_session = session.remove
#: metadata for the core tables and the core table definitions
metadata = db.MetaData()
users = db.Table('users', metadata,
db.Column('user_id', db.Integer, primary_key=True),
db.Column('username', db.String(30)),
db.Column('real_name', db.String(180)),
db.Column('display_name', db.String(180)),
db.Column('description', db.Text),
db.Column('extra', db.JsonDictPickleFallback),
db.Column('pw_hash', db.String(70)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('is_author', db.Boolean)
)
groups = db.Table('groups', metadata,
db.Column('group_id', db.Integer, primary_key=True),
db.Column('name', db.String(30))
)
group_users = db.Table('group_users', metadata,
db.Column('group_id', db.Integer, db.ForeignKey('groups.group_id')),
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id'))
)
privileges = db.Table('privileges', metadata,
db.Column('privilege_id', db.Integer, primary_key=True),
db.Column('name', db.String(50), unique=True)
)
user_privileges = db.Table('user_privileges', metadata,
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('privilege_id', db.Integer,
db.ForeignKey('privileges.privilege_id'))
)
group_privileges = db.Table('group_privileges', metadata,
db.Column('group_id', db.Integer, db.ForeignKey('groups.group_id')),
db.Column('privilege_id', db.Integer,
db.ForeignKey('privileges.privilege_id'))
)
categories = db.Table('categories', metadata,
db.Column('category_id', db.Integer, primary_key=True),
db.Column('slug', db.String(50)),
db.Column('name', db.String(50)),
db.Column('description', db.Text)
)
posts = db.Table('posts', metadata,
db.Column('post_id', db.Integer, primary_key=True),
db.Column('pub_date', db.DateTime),
db.Column('last_update', db.DateTime),
db.Column('slug', db.String(200), index=True, nullable=False),
db.Column('uid', db.String(250)),
db.Column('title', db.String(150)),
db.Column('text', db.Text),
db.Column('author_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('parser_data', db.ZEMLParserData),
db.Column('comments_enabled', db.Boolean),
db.Column('pings_enabled', db.Boolean),
db.Column('content_type', db.String(40), index=True),
db.Column('extra', db.JsonDictPickleFallback),
db.Column('status', db.Integer)
)
post_links = db.Table('post_links', metadata,
db.Column('link_id', db.Integer, primary_key=True),
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('href', db.String(250), nullable=False),
db.Column('rel', db.String(250)),
db.Column('type', db.String(100)),
db.Column('hreflang', db.String(30)),
db.Column('title', db.String(200)),
db.Column('length', db.Integer)
)
tags = db.Table('tags', metadata,
db.Column('tag_id', db.Integer, primary_key=True),
db.Column('slug', db.String(150), unique=True, nullable=False),
db.Column('name', db.String(100), unique=True, nullable=False)
)
post_categories = db.Table('post_categories', metadata,
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('category_id', db.Integer, db.ForeignKey('categories.category_id'))
)
post_tags = db.Table('post_tags', metadata,
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('tag_id', db.Integer, db.ForeignKey('tags.tag_id'))
)
comments = db.Table('comments', metadata,
db.Column('comment_id', db.Integer, primary_key=True),
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('author', db.String(160)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('text', db.Text),
db.Column('is_pingback', db.Boolean, nullable=False),
db.Column('parser_data', db.ZEMLParserData),
db.Column('parent_id', db.Integer, db.ForeignKey('comments.comment_id')),
db.Column('pub_date', db.DateTime),
db.Column('blocked_msg', db.String(250)),
db.Column('submitter_ip', db.String(100)),
db.Column('status', db.Integer, nullable=False)
)
redirects = db.Table('redirects', metadata,
db.Column('redirect_id', db.Integer, primary_key=True),
db.Column('original', db.String(200), unique=True),
db.Column('new', db.String(200))
)
def init_database(engine):
"""This is called from the websetup which explains why it takes an engine
and not a zine application.
"""
# XXX: consider using something like this for mysql:
# cx = engine.connect()
# cx.execute('set storage_engine=innodb')
# metadata.create_all(cx)
metadata.create_all(engine)
|
<gh_stars>0
import base64
from io import BytesIO
import time
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from PIL import Image
import requests
from model import detect, filter_boxes, detr, transform
from model import CLASSES, DEVICE
# Dash component wrappers
def Row(children=None, **kwargs):
return html.Div(children, className="row", **kwargs)
def Column(children=None, width=1, **kwargs):
nb_map = {
1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six',
7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten', 11: 'eleven', 12: 'twelve'}
return html.Div(children, className=f"{nb_map[width]} columns", **kwargs)
# plotly.py helper functions
def pil_to_b64(im, enc="png"):
io_buf = BytesIO()
im.save(io_buf, format=enc)
encoded = base64.b64encode(io_buf.getvalue()).decode("utf-8")
return f"data:img/{enc};base64, " + encoded
def pil_to_fig(im, showlegend=False, title=None):
img_width, img_height = im.size
fig = go.Figure()
# This trace is added to help the autoresize logic work.
fig.add_trace(go.Scatter(
x=[img_width * 0.05, img_width * 0.95],
y=[img_height * 0.95, img_height * 0.05],
showlegend=False, mode="markers", marker_opacity=0,
hoverinfo="none", legendgroup='Image'))
fig.add_layout_image(dict(
source=pil_to_b64(im), sizing="stretch", opacity=1, layer="below",
x=0, y=0, xref="x", yref="y", sizex=img_width, sizey=img_height,))
# Adapt axes to the right width and height, lock aspect ratio
fig.update_xaxes(
showgrid=False, visible=False, constrain="domain", range=[0, img_width])
fig.update_yaxes(
showgrid=False, visible=False,
scaleanchor="x", scaleratio=1,
range=[img_height, 0])
fig.update_layout(title=title, showlegend=showlegend)
return fig
def add_bbox(fig, x0, y0, x1, y1,
showlegend=True, name=None, color=None,
opacity=0.5, group=None, text=None):
fig.add_trace(go.Scatter(
x=[x0, x1, x1, x0, x0],
y=[y0, y0, y1, y1, y0],
mode="lines",
fill="toself",
opacity=opacity,
marker_color=color,
hoveron="fills",
name=name,
hoverlabel_namelength=0,
text=text,
legendgroup=group,
showlegend=showlegend,
))
# colors for visualization
COLORS = ['#fe938c','#86e7b8','#f9ebe0','#208aae','#fe4a49',
'#291711', '#5f4b66', '#b98b82', '#87f5fb', '#63326e'] * 50
RANDOM_URLS = open('random_urls.txt').read().split('\n')[:-1]
print("Running on:", DEVICE)
# Start Dash
app = dash.Dash(__name__)
server = app.server # Expose the server variable for deployments
app.layout = html.Div(className='container', children=[
Row(html.H1("Image Detection App")),
Row(html.P("Input Image URL:")),
Row([
Column(width=8, children=[
dcc.Input(id='input-url', style={'width': '100%'}, placeholder='Insert URL...'),
]),
Column(html.Button("Run DETR", id='button-run', n_clicks=0), width=2),
Column(html.Button("Random Image", id='button-random', n_clicks=0), width=2)
]),
Row(dcc.Graph(id='model-output', style={"height": "70vh"})),
Row([
Column(width=7, children=[
html.P('Non-maximum suppression (IoU):'),
Row([
Column(width=3, children=dcc.Checklist(
id='checklist-nms',
options=[{'label': 'Enabled', 'value': 'enabled'}],
value=[])),
Column(width=9, children=dcc.Slider(
id='slider-iou', min=0, max=1, step=0.05, value=0.5,
marks={0: '0', 1: '1'})),
])
]),
Column(width=5, children=[
html.P('Confidence Threshold:'),
dcc.Slider(
id='slider-confidence', min=0, max=1, step=0.05, value=0.7,
marks={0: '0', 1: '1'})
])
])
])
@app.callback(
[Output('button-run', 'n_clicks'),
Output('input-url', 'value')],
[Input('button-random', 'n_clicks')],
[State('button-run', 'n_clicks')])
def randomize(random_n_clicks, run_n_clicks):
return run_n_clicks+1, RANDOM_URLS[random_n_clicks%len(RANDOM_URLS)]
@app.callback(
[Output('model-output', 'figure'),
Output('slider-iou', 'disabled')],
[Input('button-run', 'n_clicks'),
Input('input-url', 'n_submit'),
Input('slider-iou', 'value'),
Input('slider-confidence', 'value'),
Input('checklist-nms', 'value')],
[State('input-url', 'value')])
def run_model(n_clicks, n_submit, iou, confidence, checklist, url):
apply_nms = 'enabled' in checklist
try:
im = Image.open(requests.get(url, stream=True).raw)
except:
return go.Figure().update_layout(title='Incorrect URL')
tstart = time.time()
scores, boxes = detect(im, detr, transform, device=DEVICE)
scores, boxes = filter_boxes(scores, boxes, confidence=confidence, iou=iou, apply_nms=apply_nms)
scores = scores.data.numpy()
boxes = boxes.data.numpy()
tend = time.time()
fig = pil_to_fig(im, showlegend=True, title=f'DETR Predictions ({tend-tstart:.2f}s)')
existing_classes = set()
for i in range(boxes.shape[0]):
class_id = scores[i].argmax()
label = CLASSES[class_id]
confidence = scores[i].max()
x0, y0, x1, y1 = boxes[i]
# only display legend when it's not in the existing classes
showlegend = label not in existing_classes
text = f"class={label}<br>confidence={confidence:.3f}"
add_bbox(
fig, x0, y0, x1, y1,
opacity=0.7, group=label, name=label, color=COLORS[class_id],
showlegend=showlegend, text=text,
)
existing_classes.add(label)
return fig, not apply_nms
if __name__ == '__main__':
app.run_server(debug=True)
|
#coding=utf-8
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import os
from abc import ABCMeta
from abc import abstractmethod
import logging
logger=logging.getLogger(__name__)
import torchvision
from torch.autograd import Variable
import torch.nn as nn
"""
The base model of the model.
"""
"""
Pytorch model
"""
class Model(object):
"""
Base class of model to provide attack.
Args:
bounds(tuple): The lower and upper bound for the image pixel.
channel_axis(int): The index of the axis that represents the color
channel.
preprocess(tuple): Two element tuple used to preprocess the input.
First substract the first element, then divide the second element.
"""
__metaclass__ = ABCMeta
def __init__(self, bounds, channel_axis, preprocess=None):
assert len(bounds) == 2
assert channel_axis in [0, 1, 2, 3]
self._bounds = bounds
self._channel_axis = channel_axis
# Make self._preprocess to be (0,1) if possible, so that don't need
# to do substract or divide.
if preprocess is not None:
sub, div = np.array(preprocess)
if not np.any(sub):
sub = 0
if np.all(div == 1):
div = 1
assert (div is None) or np.all(div)
self._preprocess = (sub, div)
else:
self._preprocess = (0, 1)
def bounds(self):
"""
Return the upper and lower bounds of the model.
"""
return self._bounds
def channel_axis(self):
"""
Return the channel axis of the model.
"""
return self._channel_axis
def _process_input(self, input_):
res = None
sub, div = self._preprocess
if np.any(sub != 0):
res = input_ - sub
if not np.all(sub == 1):
if res is None: # "res = input_ - sub" is not executed!
res = input_ / div
else:
res /= div
if res is None: # "res = (input_ - sub)/ div" is not executed!
return input_
return res
@abstractmethod
def predict(self, data):
"""
Calculate the prediction of the data.
Args:
data(numpy.ndarray): input data with shape (size,
height, width, channels).
Return:
numpy.ndarray: predictions of the data with shape (batch_size,
num_of_classes).
"""
raise NotImplementedError
@abstractmethod
def num_classes(self):
"""
Determine the number of the classes
Return:
int: the number of the classes
"""
raise NotImplementedError
@abstractmethod
def gradient(self, data, label):
"""
Calculate the gradient of the cross-entropy loss w.r.t the image.
Args:
data(numpy.ndarray): input data with shape (size, height, width,
channels).
label(int): Label used to calculate the gradient.
Return:
numpy.ndarray: gradient of the cross-entropy loss w.r.t the image
with the shape (height, width, channel).
"""
raise NotImplementedError
@abstractmethod
def predict_name(self):
"""
Get the predict name, such as "softmax",etc.
:return: string
"""
raise NotImplementedError
#直接加载pb文件
class PytorchModel(Model):
def __init__(self,
model,
loss,
bounds,
channel_axis=3,
nb_classes=10,
preprocess=None,
device=None):
import torch
if preprocess is None:
preprocess = (0, 1)
super(PytorchModel, self).__init__(
bounds=bounds, channel_axis=channel_axis, preprocess=preprocess)
self._model = model
#暂时不支持自定义loss
self._loss=loss
self._nb_classes=nb_classes
if not device:
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
elif device == -1:
self._device = torch.device("cpu")
else:
self._device = torch.device("cuda:{}".format(device))
print(self._device)
logger.info("Finish PytorchModel init")
#返回值为标量
def predict(self, data):
"""
Calculate the prediction of the data.
Args:
data(numpy.ndarray): input data with shape (size,
height, width, channels).
Return:
numpy.ndarray: predictions of the data with shape (batch_size,
num_of_classes).
"""
import torch
scaled_data = self._process_input(data)
scaled_data = torch.from_numpy(scaled_data).to(self._device)
# Run prediction
predict = self._model(scaled_data)
#if A3C choose action output, don't care about value for evaluation
if type(predict)==tuple:
predict = predict[1]
predict = np.squeeze(predict, axis=0)
predict=predict.detach()
predict=predict.cpu().numpy().copy()
#logging.info(predict)
return predict
#返回值为tensor
def predict_tensor(self, data):
"""
Calculate the prediction of the data.
Args:
data(numpy.ndarray): input data with shape (size,
height, width, channels).
Return:
numpy.ndarray: predictions of the data with shape (batch_size,
num_of_classes).
"""
import torch
scaled_data = self._process_input(data).to(self._device)
#scaled_data = torch.from_numpy(scaled_data)
# Run prediction
predict = self._model(scaled_data)
#predict = np.squeeze(predict, axis=0)
#predict=predict.detach()
#predict=predict.numpy()
#logging.info(predict)
return predict
def num_classes(self):
"""
Calculate the number of classes of the output label.
Return:
int: the number of classes
"""
return self._nb_classes
def gradient(self, data, label):
"""
Calculate the gradient of the cross-entropy loss w.r.t the image.
Args:
data(numpy.ndarray): input data with shape (size, height, width,
channels).
label(int): Label used to calculate the gradient.
Return:
numpy.ndarray: gradient of the cross-entropy loss w.r.t the image
with the shape (height, width, channel).
"""
import torch
scaled_data = self._process_input(data)
#logging.info(scaled_data)
scaled_data = torch.from_numpy(scaled_data).to(self._device)
scaled_data.requires_grad = True
label = np.array([label])
label = torch.from_numpy(label).to(self._device)
#deal with multiple outputs
try:
output=self.predict_tensor(scaled_data).to(self._device)
except(AttributeError):
output = self.predict_tensor(scaled_data)[1].to(self._device)
#loss=self._loss(output, label)
ce = nn.CrossEntropyLoss()
loss=-ce(output, label)
#计算梯度
# Zero all existing gradients
self._model.zero_grad()
loss.backward()
#技巧 梯度也是tensor 需要转换成np
grad = scaled_data.grad.cpu().numpy().copy()
return grad.reshape(scaled_data.shape)
def predict_name(self):
"""
Get the predict name, such as "softmax",etc.
:return: string
"""
return self._predict_program.block(0).var(self._predict_name).op.type
|
from pyehr.ehr.services.dbmanager.drivers.factory import DriversFactory
from pyehr.utils import get_logger
from pyehr.ehr.services.dbmanager.dbservices.index_service import IndexService
from pyehr.ehr.services.dbmanager.dbservices.wrappers import PatientRecord, ClinicalRecord
from pyehr.ehr.services.dbmanager.errors import CascadeDeleteError, RedundantUpdateError,\
RecordRestoreUnnecessaryError, OperationNotAllowedError, ConfigurationError
from pyehr.ehr.services.dbmanager.dbservices.version_manager import VersionManager
from collections import Counter
class DBServices(object):
"""
This class exports all the services used to manipulate patients' and clinical data
stored in the database. The DBServices class acts as middleware to the specific driver.
:ivar driver: the type of driver that must be used
:ivar host: hostname of the server running the DB
:ivar user: (optional) username to access the DB
:ivar passwd: (optional) password to access the DB
:ivar port: (optional) port used to contact the DB
:ivar database: the name of the database where data are stored
:ivar patients_repository: (optional) repository where patients' data are stored
:ivar ehr_repository: (optional) repository where clinical records data are stored
:ivar ehr_versioning_repository: (optional) repository where older versions of clinical
records are stored
:ivar logger: logger for the DBServices class, if no logger is provided a new one
is created
"""
def __init__(self, driver, host, database, versioning_database=None,
patients_repository=None, ehr_repository=None,
ehr_versioning_repository=None, port=None, user=None,
passwd=None, logger=None):
self.driver = driver
self.host = host
self.database = database
self.versioning_database = versioning_database
self.patients_repository = patients_repository
self.ehr_repository = ehr_repository
self.ehr_versioning_repository = ehr_versioning_repository
self.port = port
self.user = user
self.passwd = <PASSWORD>
self.index_service = None
self.logger = logger or get_logger('db_services')
self.version_manager = self._set_version_manager()
def _get_drivers_factory(self, repository):
return DriversFactory(
driver=self.driver,
host=self.host,
database=self.database,
repository=repository,
port=self.port,
user=self.user,
passwd=<PASSWORD>,
index_service=self.index_service,
logger=self.logger
)
def _set_version_manager(self):
return VersionManager(
driver=self.driver,
host=self.host,
database=self.database,
versioning_database=self.versioning_database,
ehr_repository=self.ehr_repository,
ehr_versioning_repository=self.ehr_versioning_repository,
index_service=self.index_service,
port=self.port,
user=self.user,
passwd=<PASSWORD>,
logger=self.logger
)
def _check_index_service(self):
if not self.index_service:
raise ConfigurationError('Operation not allowed, missing IndexService')
def set_index_service(self, url, database, user, passwd):
"""
Add a :class:`IndexService` to the current :class:`DBService` that will be used
to index clinical records
:param url: the URL of the :class:`IndexService`
:type url: str
:param database: the database used to store the indices
:type database: str
:param user: the user to access the :class:`IndexService`
:type user: str
:param passwd: the password to access the :class:`IndexService`
:type passwd: str
"""
self.index_service = IndexService(database, url, user, passwd, self.logger)
# update version manager as well
self.version_manager = self._set_version_manager()
def save_patient(self, patient_record):
"""
Save a patient record to the DB.
:param patient_record: patient record that is going to be saved
:type patient_record: :class:`PatientRecord`
:return: a :class:`PatientRecord` object
"""
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
patient_record.record_id = driver.add_record(driver.encode_record(patient_record))
return patient_record
def _set_structure_id(self, ehr_record):
ehr_data = ehr_record.ehr_data.to_json()
structure_id = self.index_service.get_structure_id(ehr_data)
ehr_record.structure_id = structure_id
def save_ehr_record(self, ehr_record, patient_record, record_moved=False):
"""
Save a clinical record into the DB and link it to a patient record
:param ehr_record: EHR record that is going to be saved
:type ehr_record: :class:`ClinicalRecord`
:param patient_record: the reference :class:`PatientRecord` for the EHR record that
is going to be saved
:type patient_record: :class:`PatientRecord`
:param record_moved: if True, the record has been moved from another patient, if
False the record should be considered as a new one
:type record_moved: bool
:return: the :class:`ClinicalRecord` and the :class:`PatientRecord`
"""
self._check_index_service()
drf = self._get_drivers_factory(self.ehr_repository)
ehr_record.bind_to_patient(patient_record)
if ehr_record.is_persistent:
if record_moved:
ehr_record = self.version_manager.update_field(ehr_record, 'patient_id',
ehr_record.patient_id, 'last_update')
else:
raise OperationNotAllowedError('An already mapped record can\'t be assigned to a patient')
else:
# saving a new record, this is the first revision
ehr_record.increase_version()
# calculate and set the structure ID for the given record
self._set_structure_id(ehr_record)
with drf.get_driver() as driver:
try:
driver.add_record(driver.encode_record(ehr_record))
except Exception, e:
# if a new structure was created, delete it (reference counter is 0)
self.index_service.check_structure_counter(ehr_record.structure_id)
raise e
self.index_service.increase_structure_counter(ehr_record.structure_id)
patient_record = self._add_ehr_record(patient_record, ehr_record)
return ehr_record, patient_record
def save_ehr_records(self, ehr_records, patient_record, skip_existing_duplicated=False):
"""
Save a batch of clinical records into the DB and link them to a patient record
:param ehr_records: EHR records that are going to be saved
:type ehr_records: list of :class:`ClinicalRecord` objects
:param patient_record: the reference :class:`PatientRecord` for the EHR record that
is going to be saved
:type patient_record: :class:`PatientRecord`
:param skip_existing_duplicated: if True, continue with the save operation even if one
or more DuplicatedKeyError occur, if False raise an error
:type skip_existing_duplicated: bool
:return: a list with the saved :class:`ClinicalRecord`, the updated :class:`PatientRecord`
and a list containing any records that caused a duplicated key error
"""
self._check_index_service()
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
for r in ehr_records:
# calculate and set the structure ID for the given record
self._set_structure_id(r)
r.bind_to_patient(patient_record)
if not r.is_persistent:
r.increase_version()
encoded_records = [driver.encode_record(r) for r in ehr_records]
try:
saved, errors = driver.add_records(encoded_records, skip_existing_duplicated)
except Exception, exc:
for ehr in ehr_records:
self.index_service.check_structure_counter(ehr.structure_id)
raise exc
errors = [driver.decode_record(e) for e in errors]
saved_struct_counter = Counter()
for rec in ehr_records:
if rec.record_id in saved:
saved_struct_counter[rec.structure_id] += 1
error_struct_counter = set([rec.record_id for rec in errors])
for struct, counter in saved_struct_counter.iteritems():
self.index_service.increase_structure_counter(struct, counter)
for struct in error_struct_counter:
self.index_service.check_structure_counter(struct)
saved_ehr_records = [ehr for ehr in ehr_records if ehr.record_id in saved]
patient_record = self._add_ehr_records(patient_record, saved_ehr_records)
return saved_ehr_records, patient_record, errors
def _add_ehr_record(self, patient_record, ehr_record):
"""
Add an already saved :class:`ClinicalRecord` to the given :class:`PatientRecord`
:param patient_record: the reference :class:`PatientRecord`
:type patient_record: :class:`PatientRecord`
:param ehr_record: the existing :class:`ClinicalRecord` that is going to be added to the
patient record
:type ehr_record: :class:`ClinicalRecord`
:return: the updated :class:`PatientRecord`
"""
self._add_to_list(patient_record, 'ehr_records', ehr_record.record_id)
patient_record.ehr_records.append(ehr_record)
return patient_record
def _add_ehr_records(self, patient_record, ehr_records):
"""
Add a list of already saved :class:`ClinicalRecord`s to the given :class:`PatientRecord`
:param patient_record: the reference :class:`PatientRecord`
:type patient_record: :class:`PatientRecord`
:param ehr_records: the existing :class:`ClinicalRecord`s that are going to be added to the
patient record
:type ehr_records: list
:return: the updated :class:`PatientRecord`
"""
self._extend_list(patient_record, 'ehr_records', [ehr.record_id for ehr in ehr_records])
patient_record.ehr_records.extend(ehr_records)
return patient_record
def update_ehr_record(self, ehr_record):
"""
Save the given *ehr_record* as an update of the one already existing in the database.
Record's ID and version number will be used to match the proper object. Existing EHR
will be overwritten by the given one.
:param ehr_record: the :class:`ClinicalRecord` that will be saved as update
:type ehr_record: :class:`ClinicalRecord`
:return: the given *ehr_record* with proper *update_timestamp* and *version*
fields
:rtype: :class:`ClinicalRecord`
"""
if not ehr_record.is_persistent:
raise OperationNotAllowedError('Record %s is not mapped in the DB, unable to update' %
ehr_record.record_id)
return self.version_manager.update_record(ehr_record)
def _check_unecessary_restore(self, ehr_record):
if ehr_record.version == 1:
raise RecordRestoreUnnecessaryError('Record %s already at original revision' %
ehr_record.record_id)
elif ehr_record.version == 0:
raise OperationNotAllowedError('Record %s is not mapped in the DB, unable to restore' %
ehr_record.record_id)
def restore_ehr_version(self, ehr_record, version):
"""
Restore a specific *version* for the given *ehr_record*. All saved revisions that
are newer than *version* will be deleted from the :class:`VersionManager`.
:param ehr_record: the :class:`ClinicalRecord` that will be restored
:type ehr_record: :class:`ClinicalRecord`
:param version: the version of the record that will replace the one saved
within the DB
:type version: int
:return: the restored :class:`ClinicalRecord` and the count of the revisions
that have been deleted
"""
self._check_unecessary_restore(ehr_record)
return self.version_manager.restore_revision(ehr_record.record_id, version)
def restore_original_ehr(self, ehr_record):
"""
Restore the original version of the given *ehr_record*. All revisions
will be deleted from the :class:`VersionManager`.
:param ehr_record: the :class:`ClinicalRecord` that will be restored
:type ehr_record: :class:`ClinicalRecord`
:return: the restored :class:`ClinicalRecord` and the count of the revisions
that have been deleted
"""
self._check_unecessary_restore(ehr_record)
return self.version_manager.restore_original(ehr_record.record_id)
def restore_previous_ehr_version(self, ehr_record):
"""
Restore giver *ehr_record* to its previous revision.
:param ehr_record: the :class:`ClinicalRecord` that will be restored
:type ehr_record: :class:`ClinicalRecord`
:return: the restored :class:`ClinicalRecord`
"""
return self.restore_ehr_version(ehr_record, ehr_record.version-1)[0]
def get_revision(self, ehr_record, version):
"""
Get a specific *version* of the given *ehr_record*
:param ehr_record: the :class:`ClinicalRecord` that will be used to retrieve the wanted *version*
:type ehr_record: :class:`ClinicalRecord`
:param version: the revision of the object that will be retrieved
:type version: int
:return: a :class:`ClinicalRecordRevision` object matching the selected *version* or None
if no match is fuond
:rtype: :class:`ClinicalRecordRevision` or None
"""
return self.version_manager.get_revision(ehr_record.record_id, version)
def get_revisions(self, ehr_record, reverse_ordering=False):
"""
Get all revisions for the given *ehr_record* ordered from the older to the newer.
If *reverse_ordering* is True, revisions will be ordered from the newer to the older.
:param ehr_record: the :class:`ClinicalRecord` for which will be retrieved old revisions
:type ehr_record: :class:`ClinicalRecord`
:param reverse_ordering: if False (default) revisions will be ordered from the older to
the newer; if True the opposite ordering will be applied (newer to older).
:type reverse_ordering: bool
:return: an ordered list with all the revisions for the given *ehr_record*
:rtype: list
"""
return self.version_manager.get_revisions(ehr_record.record_id, reverse_ordering)
def move_ehr_record(self, src_patient, dest_patient, ehr_record, reset_ehr_record_history=False):
"""
Move a saved :class:`ClinicalRecord` from a saved :class:`PatientRecord` to another one
:param src_patient: the :class:`PatientRecord` related to the EHR record that is going to be
moved
:type src_patient: :class:`PatientRecord`
:param dest_patient: the :class:`PatientRecord` which will be associated with the the EHR record
:type dest_patient: :class:`PatientRecord`
:param ehr_record: the :class:`ClinicalRecord` that is going to be moved
:type ehr_record: :class:`ClinicalRecord`
:param reset_ehr_record_history: if True, reset EHR record history and delete all revisions, if False
keep record's history and keep trace of the move event. Default value is False.
:type reset_ehr_record_history: bool
:return: the two :class:`PatientRecord` mapping the proper association to the EHR record
"""
ehr_record, src_patient = self.remove_ehr_record(ehr_record, src_patient,
reset_record=reset_ehr_record_history)
ehr_record, dest_patient = self.save_ehr_record(ehr_record, dest_patient,
record_moved=True)
return src_patient, dest_patient
def remove_ehr_record(self, ehr_record, patient_record, reset_record=True):
"""
Remove a :class:`ClinicalRecord` from a patient's records and delete
it from the database if *reset_record* is True.
:param ehr_record: the :class:`ClinicalRecord` that will be deleted
:type ehr_record: :class:`ClinicalRecord`
:param patient_record: the reference :class:`PatientRecord`
:type patient_record: :class:`PatientRecord`
:param reset_record: if True, reset ehr record (new ID and delete its revisions)
:type reset_record: bool
:return: the EHR record without an ID and the updated patient record
:rtype: :class:`ClinicalRecord`, :class:`PatientRecord`
"""
self._remove_from_list(patient_record, 'ehr_records', ehr_record.record_id)
patient_record.ehr_records.pop(patient_record.ehr_records.index(ehr_record))
if reset_record:
self._delete_ehr_record(ehr_record, reset_record)
ehr_record.reset()
else:
ehr_record.patient_id = None
return ehr_record, patient_record
def remove_ehr_records(self, ehr_records, patient_record, reset_records=True):
"""
:param ehr_records:
:param patient_record:
:param reset_records:
:return:
"""
self._remove_from_list(patient_record, 'ehr_records', [ehr.record_id for ehr in ehr_records])
for ehr in ehr_records:
patient_record.ehr_records.pop(patient_record.ehr_records.index(ehr))
if reset_records:
self._delete_ehr_records(ehr_records, reset_records)
for ehr in ehr_records:
ehr.reset()
else:
for ehr in ehr_records:
ehr.patient_id = None
return ehr_records, patient_record
def _get_active_records(self, driver):
return driver.get_records_by_value('active', True)
def _fetch_patient_data_full(self, patient_doc, fetch_ehr_records=True,
fetch_hidden_ehr=False):
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
patient_record = driver.decode_record(patient_doc)
ehr_records = []
for ehr in patient_record.ehr_records:
ehr_doc = driver.get_record_by_id(ehr.record_id)
if fetch_hidden_ehr or (not fetch_hidden_ehr and ehr_doc['active']):
self.logger.debug('fetch_hidden_ehr: %s --- ehr_doc[\'active\']: %s',
fetch_hidden_ehr, ehr_doc['active'])
ehr_records.append(driver.decode_record(ehr_doc, fetch_ehr_records))
self.logger.debug('ehr_records: %r', ehr_records)
else:
self.logger.debug('Ignoring hidden EHR record %r', ehr_doc['_id'])
patient_record.ehr_records = ehr_records
return patient_record
def get_patients(self, active_records_only=True, fetch_ehr_records=True,
fetch_hidden_ehr=False):
"""
Get all patients from the DB.
:param active_records_only: if True fetch only active patient records, if False get all
patient records from the DB
:type active_records_only: boolean
:param fetch_ehr_records: if True fetch connected EHR records as well, if False only EHR records'
IDs will be retrieved
:type fetch_ehr_records: boolean
:param fetch_hidden_ehr: if False only fetch active EHR records, if True fetch all EHR records
connected to the given patient record
:type fetch_hidden_ehr: boolean
:return: a list of :class:`PatientRecord` objects
"""
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
if not active_records_only:
patient_records = driver.get_all_records()
else:
patient_records = self._get_active_records(driver)
return [self._fetch_patient_data_full(r, fetch_ehr_records,
fetch_hidden_ehr) for r in patient_records]
def get_patient(self, patient_id, fetch_ehr_records=True, fetch_hidden_ehr=False):
"""
Load the :class:`PatientRecord` that matches the given ID from the DB.
:param patient_id: the ID of the record
:param fetch_ehr_records: if True fetch connected EHR records as well, if False only EHR records'
IDs will be retrieved
:type fetch_ehr_records: boolean
:param fetch_hidden_ehr: if False only fetch active EHR records, if True fetch all EHR records
connected to the given patient record
:type fetch_hidden_ehr: boolean
:return: the :class:`PatientRecord` matching the given ID or None if no matching record was found
"""
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
patient_record = driver.get_record_by_id(patient_id)
if not patient_record:
return None
return self._fetch_patient_data_full(patient_record, fetch_ehr_records,
fetch_hidden_ehr)
def get_ehr_record(self, ehr_record_id, patient_id):
"""
Load a `ClinicalRecord` that matches the given *ehr_record_id* and that belongs
to the `PatientRecord` with ID *patient_id*. If no record with *ehr_record_id* is found or
if record doesn't belong to *patient_id* None will be returned.
:param ehr_record_id: the ID of the clinical record
:param patient_id: the ID of the patient that the clinical record must belong to
:return: a :class:`ClinicalRecord` object or None
"""
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
try:
ehr_record = driver.decode_record(driver.get_record_by_id(ehr_record_id))
except TypeError:
return None
if ehr_record.patient_id != patient_id:
return None
else:
return ehr_record
def load_ehr_records(self, patient):
"""
Load all :class:`ClinicalRecord` objects connected to the given :class:`PatientRecord` object
:param patient: the patient record object
:type patient: :class:`PatientRecord`
:return: the :class:`PatientRecord` object with loaded :class:`ClinicalRecord`
:type: :class:`PatientRecord`
"""
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
ehr_records = [driver.get_record_by_id(ehr.record_id) for ehr in patient.ehr_records]
patient.ehr_records = [driver.decode_record(ehr) for ehr in ehr_records]
return patient
def hide_patient(self, patient):
"""
Hide a :class:`PatientRecord` object
:param patient: the patient record that is going to be hidden
:type patient: :class:`PatientRecord`
:return: the patient record
:rtype: :class:`PatientRecord`
"""
if patient.active:
for ehr_rec in patient.ehr_records:
try:
self.hide_ehr_record(ehr_rec)
except RedundantUpdateError:
# just ignore RedundantUpdateError, this means that the records
# is already hidden
self.logger.debug('Record %s is already hidden', ehr_rec.record_id)
pass
rec = self._hide_record(patient)
else:
rec = patient
return rec
def hide_ehr_record(self, ehr_record):
"""
Hide a :class:`ClinicalRecord` object
:param ehr_record: the clinical record that is going to be hidden
:type ehr_record: :class:`ClinicalRecord`
:return: the clinical record
:rtype: :class:`ClinicalRecord`
"""
if ehr_record.active:
rec = self._hide_record(ehr_record)
else:
rec = ehr_record
return rec
def delete_patient(self, patient, cascade_delete=False):
"""
Delete a patient from the DB. A patient record can be deleted only if it has no clinical record connected
or if the *cascade_delete* option is set to True. If clinical records are still connected and *cascade_delete*
option is set to False, a :class:`CascadeDeleteError` exception will be thrown.
:param patient: the patient record that is going to be deleted
:type patient: :class:`PatientRecord`
:param cascade_delete: if True connected `ClinicalRecord` objects will be deleted as well
:type cascade_delete: boolean
:raise: :class:`CascadeDeleteError` if a record with connected clinical record is going to be deleted and
cascade_delete option is False
"""
def reload_patient(patient):
return self.get_patient(patient.record_id, fetch_ehr_records=False,
fetch_hidden_ehr=True)
patient = reload_patient(patient)
if not cascade_delete and len(patient.ehr_records) > 0:
raise CascadeDeleteError('Unable to delete patient record with ID %s, %d EHR records still connected',
patient.record_id, len(patient.ehr_records))
else:
self._delete_ehr_records(patient.ehr_records)
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
driver.delete_record(patient.record_id)
return None
def _delete_ehr_record(self, ehr_record, reset_history=True):
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
driver.delete_record(ehr_record.record_id)
self.index_service.decrease_structure_counter(ehr_record.structure_id)
if reset_history:
self.version_manager.remove_revisions(ehr_record.record_id)
return None
def _delete_ehr_records(self, ehr_records, reset_history=True):
drf = self._get_drivers_factory(self.ehr_repository)
with drf.get_driver() as driver:
driver.delete_records_by_id([ehr.record_id for ehr in ehr_records])
struct_id_counter = Counter()
for rec in ehr_records:
struct_id_counter[rec.structure_id] += 1
for str_id, str_count in struct_id_counter.iteritems():
self.index_service.decrease_structure_counter(str_id, str_count)
if reset_history:
for ehr in ehr_records:
self.version_manager.remove_revisions(ehr.record_id)
return None
def _hide_record(self, record):
if isinstance(record, ClinicalRecord):
return self.version_manager.update_field(record, 'active', False, 'last_update')
else:
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
last_update = driver.update_field(record.record_id, 'active', False, 'last_update')
record.last_update = last_update
record.active = False
return record
def _add_to_list(self, record, list_label, element):
if isinstance(record, ClinicalRecord):
return self.version_manager.add_to_list(record, list_label, element, 'last_update')
else:
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
last_update = driver.add_to_list(record.record_id, list_label, element, 'last_update')
record.last_update = last_update
return record
def _extend_list(self, record, list_label, elements):
if isinstance(record, ClinicalRecord):
return self.version_manager.extend_list(record, list_label, elements, 'last_update')
else:
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
last_update = driver.extend_list(record.record_id, list_label, elements, 'last_update')
record.last_update = last_update
return record
def _remove_from_list(self, record, list_label, element):
if isinstance(record, ClinicalRecord):
return self.version_manager.remove_from_list(record, list_label, element, 'last_update')
elif isinstance(record, PatientRecord):
drf = self._get_drivers_factory(self.patients_repository)
with drf.get_driver() as driver:
last_update = driver.remove_from_list(record.record_id, list_label, element, 'last_update')
record.last_update = last_update
return record
else:
raise ValueError('Unable to handle object of class %s' % type(record))
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from django.test import TestCase
import mock
import datetime
from dateutil import tz
## Repository Test
from porchlightapi.models import Repository
# Constant values used for testing
UNDEPLOYED_VALUE_TUPLE = ('c9d2d5b79edd7d4acaf7172a98203bf3aee2586a',
datetime.datetime(year=1972, month=3, day=17, hour=8, minute=23, tzinfo=tz.tzutc()),
5)
DEPLOYED_VALUE_TUPLE = ('ba60a64b151e402a9f08f95710ec09db4649eb2e',
datetime.datetime(year=1972, month=2, day=29, hour=10, minute=45, tzinfo=tz.tzutc()),
2)
class RepositoryTestCase(TestCase):
def setUp(self):
# Create a repository object for us to test
Repository.objects.create(
url='https://github.com/cfpb/porchlight',
name='Porchlight',
project='System Tools',
deployed_value_source='porchlightapi.sources.random_source',
undeployed_value_source='porchlightapi.sources.random_source',
value_calculator='porchlightapi.sources.difference_value_calculator')
@mock.patch("porchlightapi.sources.random_source")
def test_undeployed_value_source(self, random_source):
"""
Test that the model's undeployed_value() function correctly
uses the lookup function to get and run the mock data source
function.
"""
random_source.return_value = UNDEPLOYED_VALUE_TUPLE
test_repo = Repository.objects.get(url='https://github.com/cfpb/porchlight')
undeployed_value_tuple = test_repo.undeployed_value()
self.assertEqual(undeployed_value_tuple[0], UNDEPLOYED_VALUE_TUPLE[0])
self.assertEqual(undeployed_value_tuple[1], UNDEPLOYED_VALUE_TUPLE[1])
self.assertEqual(undeployed_value_tuple[2], UNDEPLOYED_VALUE_TUPLE[2])
@mock.patch("porchlightapi.sources.random_source")
def test_deployed_value_source(self, random_source):
"""
Test that the model's undeployed_value() function correctly
uses the lookup function to get and run the mock data source
function.
"""
random_source.return_value = DEPLOYED_VALUE_TUPLE
test_repo = Repository.objects.get(url='https://github.com/cfpb/porchlight')
deployed_value_tuple = test_repo.deployed_value()
self.assertEqual(deployed_value_tuple[0], DEPLOYED_VALUE_TUPLE[0])
self.assertEqual(deployed_value_tuple[1], DEPLOYED_VALUE_TUPLE[1])
self.assertEqual(deployed_value_tuple[2], DEPLOYED_VALUE_TUPLE[2])
@mock.patch("porchlightapi.sources.difference_value_calculator")
def test_value(self, difference_value_calculator):
"""
Test that the model's value() function correctly uses the lookup function
to get and run the value calculator function.
"""
difference_value_calculator.return_value = 3
test_repo = Repository.objects.get(url='https://github.com/cfpb/porchlight')
self.assertEqual(test_repo.value(UNDEPLOYED_VALUE_TUPLE, DEPLOYED_VALUE_TUPLE),
5 - 2)
## Value Data Points
from porchlightapi.models import ValueDataPointManager
class ValueDataPointManagerTestCase(TestCase):
@mock.patch('porchlightapi.models.ValueDataPoint')
def test_create_datapoint(self, ValueDataPoint):
"""
Test the ValueDataPointManager's creation of ValueDataPoint
objects from Repository objects. The manager should populate
the ValueDataPoint using the Repository's value methods, which
call the appropriate callables.
"""
# Create a mock repository to pass to the ValueDataPointManager
# create_datapoint() method with the appropriate return values.
mock_repository = mock.create_autospec(Repository)
mock_repository.undeployed_value.return_value = UNDEPLOYED_VALUE_TUPLE
mock_repository.deployed_value.return_value = DEPLOYED_VALUE_TUPLE
mock_repository.value.return_value = 3
# We want to test that the create_datapoint method extracts the correct
# values from the repository and calls the default create() method with
# those values.
objects = ValueDataPointManager()
objects.create = mock.MagicMock()
datapoint = objects.create_datapoint(mock_repository)
objects.create.assert_called_with(
repository=mock_repository,
undeployed_identifier=UNDEPLOYED_VALUE_TUPLE[0],
undeployed_datetime=UNDEPLOYED_VALUE_TUPLE[1],
undeployed_value=UNDEPLOYED_VALUE_TUPLE[2],
deployed_identifier=DEPLOYED_VALUE_TUPLE[0],
deployed_datetime=DEPLOYED_VALUE_TUPLE[1],
deployed_value=DEPLOYED_VALUE_TUPLE[2],
value=3)
## Test Data Sources
import datetime
from porchlightapi.sources import github_commit_source
from porchlightapi.sources import github_tag_source
from porchlightapi.sources import json_file_source
class GithubDataSourceTestCase(TestCase):
def setUp(self):
"""
Set up the mock request responses for Github.
"""
# Call to /repos/porchlight is only interested in size
self.mock_repo_response = mock.MagicMock()
self.mock_repo_response.json.return_value = {u'size': 1619,}
# Call to /repos/porchlight/branches/master is used to
# get last commit SHA and URL
self.mock_branches_response = mock.MagicMock()
self.mock_branches_response.json.return_value = {u'commit':
{u'sha': u'<PASSWORD>2e3<PASSWORD>6<PASSWORD>1',
u'url': u'https://api.github.com/repos/cfpb/porchlight/commits/130df1874519c11a79ac4a2e3e6671a165860441'}
}
# Call to /repos/porchlight/tags is used to get latest commit SHA and
# tag name
self.mock_tags_response = mock.MagicMock()
self.mock_tags_response.json.return_value = [{
u'commit':{u'sha':u'130df1874519c11a79ac4a2e3e6671a165860441'},
u'name':u'v0.1.0'
},]
self.mock_no_tags_response = mock.MagicMock()
self.mock_no_tags_response.json.return_value = [{
u'commit':{u'sha':u'130df1874519c11a79ac4a2e3e6671a165860441'},
u'name':u'atag'
},]
# Call to the commit itself /repos/porchlight/commits/130df1874519c11a79ac4a2e3e6671a165860441
# is used to get the date and file data
self.mock_commit_response = mock.MagicMock()
self.mock_commit_response.json.return_value = {
u'commit': {u'committer': {u'date': u'2015-01-26 21:44:20Z',},},
u'files':[
{'additions': 1, 'deletions': 2, 'changes':3},
{'additions': 4, 'deletions': 5, 'changes':6},
{'additions': 7, 'deletions': 8, 'changes':9},
]
}
self.test_date = datetime.datetime(year=2015, month=01, day=26, hour=21,
minute=44, second=20, tzinfo=tz.tzutc())
# A mock repository with a URL
self.mock_repository = mock.create_autospec(Repository)
self.mock_repository.url = 'https://github.com/cfpb/porchlight'
@mock.patch("requests.get")
def test_github_commit_source(self, mock_request_get):
# Test that our Github source function correctly constructs URLs by
# mocking requests.get()
# There should be 3 calls to request.get(), one for the repository (to
# get size), one for branches, and one for commits.
# XXX: Because we're not using the repo size, it's been commented out to
# reduce API hits.
mock_request_get.side_effect = [
# self.mock_repo_response,
self.mock_branches_response,
self.mock_commit_response
]
source_tuple = github_commit_source(self.mock_repository)
self.assertEqual(source_tuple[0], '130df1874519c11a79ac4a2e3e6671a165860441')
self.assertEqual(source_tuple[1], self.test_date)
self.assertEqual(source_tuple[2], 15)
@mock.patch("requests.get")
def test_github_tag_source(self, mock_request_get):
# Test that our Github source function correctly constructs URLs by
# mocking requests.get().
# For tags there should be two calls to request.get(), one for tags and
# then one for the commit for the tag we're interested in.
mock_request_get.side_effect = [
self.mock_tags_response,
self.mock_commit_response
]
source_tuple = github_tag_source(self.mock_repository)
self.assertEqual(source_tuple[0], '130df1874519c11a79ac4a2e3e6671a165860441')
self.assertEqual(source_tuple[1], self.test_date)
self.assertEqual(source_tuple[2], 15)
# Now test that if there is no tag that matches our pattern that we
# return as close to a 'no-value' as we can.
mock_request_get.side_effect = [
self.mock_no_tags_response,
self.mock_commit_response
]
source_tuple = github_tag_source(self.mock_repository)
self.assertEqual(source_tuple[0], '')
self.assertEqual(source_tuple[1], None)
self.assertEqual(source_tuple[2], 0)
@mock.patch("__builtin__.open")
@mock.patch("json.load")
@mock.patch("requests.get")
def test_repo_json(self, mock_request_get, mock_json_load, mock_open):
test_date = datetime.datetime(year=2015, month=01, day=26, hour=21,
minute=44, second=20, tzinfo=tz.tzutc())
# We just want to ignore the call to open() altogether
mock_open.return_value = None
# Mock the contents of the json file.
mock_json_load.return_value = [{u'commit': '130df1874519c11a79ac4a2e3e6671a165860441',
u'repo': 'https://github.com/CFPB/porchlight.git',
u'date': u'Mon Jan 26 21:44:20 UTC 2015'},]
# Mock the requests.get() calls to github API. This differs from
# github_commit_source() above because we get the commit SHA from the
# json data rather than from the tip of a branch.
mock_request_get.side_effect = [
# self.mock_repo_response,
self.mock_commit_response
]
source_tuple = json_file_source(self.mock_repository)
self.assertEqual(source_tuple[0], '130df1874519c11a79ac4a2e3e6671a165860441')
self.assertEqual(source_tuple[1], self.test_date)
self.assertEqual(source_tuple[2], 15)
## Test Value Calculators
from django.db import models
from porchlightapi.sources import incremental_value_calculator
class ValueCalculatorTestCase(TestCase):
def test_incremental_value_calculator(self):
mock_repository = mock.MagicMock()
mock_repository.datapoints = mock.MagicMock()
# Test an empty list of datapoints — should return the undeployed value
# tuple's value.
mock_repository.datapoints.all.return_value = []
value = incremental_value_calculator(mock_repository,
UNDEPLOYED_VALUE_TUPLE, DEPLOYED_VALUE_TUPLE)
self.assertEqual(value, 5)
# Test a prior datapoint to make sure its value is incremented by the
# undeployed value tuple's value.
mock_last_datapoint = mock.MagicMock()
mock_last_datapoint.value = 2
mock_repository.datapoints.all.return_value = [mock_last_datapoint,]
value = incremental_value_calculator(mock_repository,
UNDEPLOYED_VALUE_TUPLE, DEPLOYED_VALUE_TUPLE)
self.assertEqual(value, 7)
# Test the same value tuple to simulate deployed and undeployed being on
# the same commit, to make sure the returned value is 0.
value = incremental_value_calculator(mock_repository,
UNDEPLOYED_VALUE_TUPLE, UNDEPLOYED_VALUE_TUPLE)
self.assertEqual(value, 0)
|
<gh_stars>10-100
import shutil
import subprocess
from enum import Enum
import pytest
from laia.common.arguments import DecodeArgs
from laia.scripts.htr.decode_ctc import get_args
def test_get_args():
args = get_args(
argv=[
"syms",
"img_list",
"--common.checkpoint=model.ckpt",
"--data.color_mode=RGBA",
"--decode.use_symbols=false",
"--decode.convert_spaces=true",
]
)
assert isinstance(args, dict)
assert isinstance(args["decode"], DecodeArgs)
assert args["img_list"] == "img_list"
assert args["common"].checkpoint == "model.ckpt"
assert args["img_dirs"] is None
assert not args["decode"].use_symbols
assert args["decode"].separator == " "
assert args["decode"].join_string == " "
assert args["decode"].convert_spaces
assert args["data"].color_mode == "RGBA"
assert issubclass(type(args["data"].color_mode), Enum)
assert args["decode"].output_space == " "
@pytest.mark.parametrize(
"arg",
[
None,
"--data.batch_size=0",
"--data.color_mode=rgb",
"--decode.include_img_ids=t",
"--decode.use_symbols=f",
"--decode.convert_spaces=1",
],
)
def test_invalid_args(arg):
args = [] if arg is None else ["syms", "img_list", arg]
with pytest.raises(SystemExit):
get_args(argv=args)
def test_entry_point():
proc = subprocess.run(
[shutil.which("pylaia-htr-decode-ctc"), "-h"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
help = proc.stdout.decode()
help = " ".join(help.split())
assert help.startswith("usage: pylaia-htr-decode-ctc")
assert "syms img_list" in help
assert "--common.monitor {va_loss,va_cer,va_wer}" in help
assert "Batch size (type: int v>0, default: 8)" in help
assert "--data.color_mode {L,RGB,RGBA}" in help
assert "Decode arguments:" in help
assert "type: Union[str, null], default: null" in help
assert "(type: str, default: <space>)" in help
assert "--decode.segmentation {char,word,null}" in help
expected_config = """common:
checkpoint: null
experiment_dirname: experiment
model_filename: model
monitor: va_cer
seed: 74565
train_path: ''
data:
batch_size: 8
color_mode: L
decode:
convert_spaces: false
include_img_ids: true
input_space: <space>
join_string: ' '
output_space: ' '
segmentation: null
separator: ' '
use_symbols: true
img_dirs: null
img_list: null
logging:
filepath: null
fmt: '[%(asctime)s %(levelname)s %(name)s] %(message)s'
level: INFO
overwrite: false
to_stderr_level: ERROR
syms: null"""
def test_config_output():
proc = subprocess.run(
[shutil.which("pylaia-htr-decode-ctc"), "--print_config"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
config = proc.stdout.decode().strip()
expected = expected_config + "\ntrainer:"
assert config.startswith(expected)
def test_config_input(tmpdir):
config = tmpdir / "config"
config.write_text(expected_config, "utf-8")
args = get_args(
[f"--config={config}", "a", "b", "--img_dirs=[]", "--decode.join_string=null"]
)
assert args["syms"] == "a"
assert args["img_list"] == "b"
assert not args["img_dirs"]
assert args["decode"].join_string is None
|
<filename>codes/run_main_v2.py
# %%
"""# Interdependent Network Mitigation and Restoration Decision-making (Complete Analysis Dashboard) This notebook
finds mitigation actions and restoration plans for synthetic or infrastructure interdependent networks subject to
different initial seismic damage scenarios. Various restoration decision-making models are considered here:
* Centralized methods: These methods solve one optimization problem for the whole interdependent network, which leads
to the optimal mitigation and restoration plan. Such models assume that the decision-maker is one entity that has
complete information and authority to restore all layers of the interdependent network. These methods build upon
Interdependent Network Design Problem (INDP) [cite] and time-dependent INDP (td-INDP) [cite].
"""
# %%
import os
import matplotlib.pyplot as plt
import runutils_v2
import dindputils_v2
import plots
import pickle
plt.close('all')
# %%
"""
## Run a toy example using different methods
"""
# %%
# runutils_v2.run_sample_problems()
# %%
"""
## Run different methods for a given set of networks and a host of initial damage scenarios, post-process the outputs,
and plot the result
### Input/Output file addresses
1. `BASE_DIR`: the address of the folder where the basic network information (topology, parameters, etc.) are stored
2. `DAMAGE_DIR`: the address of the folder where the damage information are stored
3. `OUTPUT_DIR`: the address of the folder where the output files should be written
4. `FILTER_SCE`(optional): The address of the list of scenarios that should be included in the analyses. It is used to
remove less damaging scenarios from the list of damage scenarios. Set it to *None* if you don't want to use this option.
"""
# %%
BASE_DIR = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Seaside/Node_arc_info_v2/"
# '/home/hesam/Desktop/Files/Generated_Network_Dataset_v4.1/'
# "../data/Extended_Shelby_County/"
# "../data/Extended_Shelby_County_dp/"
# 'C:/Users/ht20/Box Sync/Shelby County Database/Node_arc_info'
# "C:/Users/ht20/Documents/Files/Generated_Network_Dataset_v4.1/"
# "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Seaside/Node_arc_info/"
DAMAGE_DIR = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Seaside/Damage_scenarios/cumulative_500yr_initial_damage/"
# '/home/hesam/Desktop/Files/Generated_Network_Dataset_v4.1/'
# ../data/random_disruption_shelby/"
# "../data/Wu_Damage_scenarios/"
# "C:/Users/ht20/Documents/Files/Generated_Network_Dataset_v4.1/"
# 'C:/Users/ht20/Box Sync/Shelby County Database/Damage_scenarios'
# "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Seaside/Damage_scenarios/eq_1000yr_initial_damage/"
OUTPUT_DIR = '../results/'
# '/home/hesam/Desktop/Files/Game_synthetic/v4.1/results_temp/'
# '/home/hesam/Desktop/Files/Game_Shelby_County/results/'
# 'C:/Users/ht20/Documents/Files/Auction_Extended_Shelby_County_Data/results/'
# '../results/'
# 'C:/Users/ht20/Documents/Files/Game_synthetic/v4.1/results_temp/'
# 'C:/Users/ht20/Documents/Files/Game_Shelby_County/results/'
# 'C:/Users/ht20/Documents/Files/Shelby_data_paper/Restoration_results/'
# FAIL_SCE_PARAM['TOPO']+'/results/'
FILTER_SCE = None
# '../data/damagedElements_sliceQuantile_0.90.csv'
# %%
'''
### Set analysis dictionaries
1. `FAIL_SCE_PARAM`: stores information on the type of initial damage scenarios. This dictionary should have the
following items:
1. `TYPE`: type of the initial damage network. Options are: `WU` (for the infrastructure of Shelby County, TN),
`synthetic` (for the synthetic interdependent networks), and 'from_csv' (for general use).
2. `L1_RANGE`: the damage scenarios for all datasets come in a two-level format. The implication of each level is
different based on the application. For example, for infrastructure damage scenarios, level 1 differentiates between
damge scenarios with different magnitudes, while level 2 includes samples of damage scenarios based on events with
the same magnitudes. `L1_RANGE` sets the range of the scenarios in the first level that should be included in the
analysis.
3. `L2_RANGE`: sets the range of the scenarios in the second level that should be included in the analysis.
4. `BASE_DIR`: sets the folder where the basic network information is stored.
5. `DAMAGE_DIR`: sets the folder where the damage information is stored
6. `FILTER_SCE` (optional): sets a given list of scenarios that should be included in the analyses and exclude the
rest (mostly used with **WU format** below).
7. `TOPO` (only when `TYPE`=*synthetic*): sets the topology of the synthetic networks that should be analyzed
<br><br>
Examples:
* **WU**: this is the new format that is designed by <NAME> and used in the Shelby County data paper
:cite:`Talebiyan2021`. The damage data for this dataset comes in a format similar to the hazard maps from <NAME>
:cite:`Wu2017`, which consist of N sets (`L2_RANGE`) of M damage scenarios (`L1_RANGE`). For shelby county,
for example, N=50 and M=96. To use this format, set the dictionary to:<br>
`{'TYPE':"WU", 'L1_RANGE':range(96), 'L2_RANGE':range(50),'FILTER_SCE':FILTER_SCE, 'BASE_DIR':BASE_DIR,
'DAMAGE_DIR':DAMAGE_DIR}`
* **from_csv**: for this type, the damage data come in the form of two csv files that contain all damage data
for nodes and arcs. This is a more compressed representation of damage data. In this format, there is only one
`L1_RANGE`=0, and `L2_RANGE` defines all scenarios that should be analyzed. To use this format, set the dictionary
to:<br>
`{'TYPE':"from_csv", 'L2_RANGE':range(100), 'L1_RANGE':range(0, 1), 'FILTER_SCE':None, 'BASE_DIR':BASE_DIR,
'DAMAGE_DIR':DAMAGE_DIR}`
<br><br>
:<br><br>
* **synthetic**: this type is employed only for the synthetic network dataset. In this format, network data and
damage data are in the same folder, and hence, `BASE_DIR`= `DAMAGE_DIR`. Also, `L1_RANGE` represents the range of
network configuration, and `L2_RANGE` sets the range of sample networks of each configuration in the analysis.
To use this format, set the dictionary to:<br>
`{'TYPE':"synthetic", 'L2_RANGE':range(0, 1), 'L1_RANGE':range(0, 100), 'FILTER_SCE':None, 'TOPO':'General',
'BASE_DIR':BASE_DIR, 'DAMAGE_DIR':DAMAGE_DIR}`
<br><br>
2. `DYNAMIC_PARAMS`: sets the features of the models that incorporate dynamic parameters
into the analysis. Set it to *None* if you want to use static parameters that are
constant for different time steps. So far, we only have one type of dynamic parameters,
which is the dynamic demand that is calculated based on population dislocation models,
for which, the dictionary should have the following items:
1. `TYPE`: type of the dislocation data (see below).
2. `RETURN`: type of the model for the return of the dislocated population. Options
are *step_function* and *linear*.
3. `DIR`: sets the folder where the dislocation data are stored.
4. `TESTBED` (only when `TYPE`=*incore*) : sets the name of the testbed in analysis.
<br><br>
The are two types of dislocation data:
* **shelby_adopted**: this is a precalculated dictionary that stores stylized
dislocation data for the Shelby County dataset, and the code reads those files.
To use this type, set the dictionary to:<br>
`{'TYPE': 'shelby_adopted', 'RETURN': 'step_function',
'DIR': 'C:/Users/ht20/Documents/Files/dynamic_demand/'}`
* **incore**: this type uses the population dislocation models and household
unit allocation data from IN-CORE (stored locally) to calculate demand values
in each time step of the analysis. To use this type, set the dictionary to:<br>
`{'TYPE': 'incore', 'RETURN': 'step_function', 'TESTBED':'Joplin',
'DIR': 'C:/Users/ht20/Documents/GitHub/NIST_testbeds/'}`
<br><br>
4. `EXTRA_COMMODITY`: Multi-commodity parameters dict
'''
# %%
# FAIL_SCE_PARAM = {'TYPE': "synthetic", 'L2_RANGE': range(5), 'L1_RANGE': range(100), 'TOPO': 'General',
# 'BASE_DIR': BASE_DIR, 'FILTER_SCE': FILTER_SCE, 'DAMAGE_DIR': DAMAGE_DIR}
# FAIL_SCE_PARAM = {'TYPE': "WU", 'L2_RANGE': range(7), 'L1_RANGE': range(3), 'BASE_DIR': BASE_DIR,
# 'DAMAGE_DIR': DAMAGE_DIR, 'FILTER_SCE': FILTER_SCE}
FAIL_SCE_PARAM = {'TYPE': "from_csv", 'L2_RANGE': range(0, 30), 'L1_RANGE': [500],
'FILTER_SCE': None, 'BASE_DIR': BASE_DIR, 'DAMAGE_DIR': DAMAGE_DIR}
DYNAMIC_PARAMS = None
# ROOT_DISLOC = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Joplin/"
# POP_DISLOC_DATA = ROOT_DISLOC+'Joplin_testbed/pop-dislocation-results.csv'
# DYNAMIC_PARAMS = {'TYPE': 'incore', 'RETURN': 'step_function', 'TESTBED':'joplin',
# 'OUT_DIR': BASE_DIR, 'POP_DISLOC_DATA': POP_DISLOC_DATA ,
# 'MAPPING': {'POWER': ROOT_DISLOC+'/Power/Joplin interdependency table - buildings,\
# substations, and poles/Joplin_interdependency_table.csv'}}
ROOT_DISLOC = "C:/Users/ht20/Documents/GitHub/NIST_testbeds/Seaside/"
DYNAMIC_PARAMS = {'TYPE': 'incore', 'RETURN': 'step_function', 'TESTBED': 'seaside', 'OUT_DIR': OUTPUT_DIR,
'POP_DISLOC_DATA': ROOT_DISLOC + 'Seaside_notebook/output/500yr/',
'MAPPING': {'POWER': ROOT_DISLOC + 'Power/bldgs2elec_Seaside.csv',
'WATER': ROOT_DISLOC + 'Water/bldgs2wter_Seaside.csv'}}
# Adjust output and base dir for synthetic database based on `FAIL_SCE_PARAM`
SYNTH_DIR = None
if FAIL_SCE_PARAM['TYPE'] == 'synthetic':
SYNTH_DIR = BASE_DIR + FAIL_SCE_PARAM['TOPO'] + 'Networks/'
OUTPUT_DIR += FAIL_SCE_PARAM['TOPO'] + '/results/'
EXTRA_COMMODITY = None #{1: ['PW'], 3: []}
# %%
'''
### Set analysis parameters
1. `T`: number of time steps of the analysis.
2. `RC`: list of resource caps or the number of available resources in each step of the
analysis. Each item of the list is a dictionary whose items show the type of resource and the available number of that
type of resource. For example:
* If `FAIL_SCE_PARAM[TYPE']`=*synthetic*, this item is not necessary since `R_c` is
adjusted for each configuration. Set it to to `R_c`=[0]
* If `FAIL_SCE_PARAM[TYPE']`=*WU* or *ANDRES* or *from_csv*, you have two options:
* if, for example, `R_c`= [{'budget': 3}, {'budget': 6}], then the analysis is done for the cases
when there are 3 and 6 resources available of type 'budget' (total resource assignment). If the name of resource is
set to '', the results will be consistent with older version of the code, where only one type of resource was
considered.
* if, for example, `R_c`= [{'budget': {1:1, 2:1}}, {'budget': {1:1, 2:2}}, {'budget': {1:3, 2:3}}] and given there
are 2 layers, then the analysis is done for the case where each layer gets 1 resource of type 'budget', AND
the case where layer 1 gets 1 and layer 2 gets 2 resources of type 'budget', AND
the case where each layer gets 3 resource of type 'budget' (Prescribed resource for each layer).
3. `LAYERS`: list of layers in the analysis.
* If `FAIL_SCE_PARAM[TYPE']`=*synthetic*, this item is not necessary. `LAYERS` is
adjusted for each configuration. Set it to to `LAYERS`=[0]
'''
# %%
T = 15
RC = [{'budget': {t: 245733 for t in range(T)}, 'time': {t: 115 for t in range(T)}}] #349215*(+1/13*(t-1)+.5)
RC[0]['budget'][0] = 0
RC[0]['time'][0] = 0
# RC = [{'': {t: 4 for t in range(T)}}]
# RC[0][''][0] = 0
LAYERS = [3]
# %%
'''
### Run method(s)
There are ??? choices of method:
1. `INMRP`: runs Interdependent Network Mitigation and Restoration Problem (INMRP) with is based on time-dependent
Interdependent Network Design Problem INDP (td-INDP) [cite]. To run this method,
you have to call:<br>
`runutils_v2.run_method(FAIL_SCE_PARAM, RC, LAYERS, method='INMRP', output_dir=OUTPUT_DIR, T=T,
misc = {'DYNAMIC_PARAMS':DYNAMIC_PARAMS, 'EXTRA_COMMODITY': EXTRA_COMMODITY, 'TIME_RESOURCE': False}))`
'''
# %%
runutils_v2.run_method(FAIL_SCE_PARAM, RC, T, LAYERS, method='INMRP', output_dir=OUTPUT_DIR,
misc={'DYNAMIC_PARAMS': DYNAMIC_PARAMS, 'EXTRA_COMMODITY': EXTRA_COMMODITY,
'TIME_RESOURCE': True})
# %%
'''
### Post-processing
First, you have to set a few parameters and then call functions that read outputs
and generate the pandas DataFrames that are needed for plotting the results.
##### Post-processing parameters
1. `COST_TYPES`: type of cost that should be used in processing the outputs. Options
are *Total*, *Under Supply*, *Over Supply*, *Node*, *Arc*, *Flow*, *Space Prep*, *Under Supply Perc*.
2. `REF_METHOD`: the method served as the reference in computing the relative performance
and allocation gap. Usually, this is an optimal method like `indp` or `tdindp`. However,
it can be any other method like `jc`, `ng`, or else.
3. `METHOD_NAMES`: methods whose output should be read. Options are `indp`, `tdindp`, `jc`,
`ng`, `dp_indp`, `dp_jc`, `bg????` (For example, `bgNCUI` means the Bayesian game with
two players where the first player is non-cooperative and uses uninformative belief,
and the second one is cooperative and uses the inverse false consensus belief).
##### Post-processing functions
1. `generate_combinations`: generate all the combination of outputs that should be read and
save them in `COMBS` and `OPTIMAL_COMBS` lists.
2. `read_results`: read results for combinations in `COMBS` and `OPTIMAL_COMBS` lists.
3. `relative_performance`: computes relative performance measures for different combinations.
4. `read_resource_allocation`: read the resource allocations by different methods and
compute allocation gaps for different combinations.
5. `read_run_time`: compute run time for different combinations.
6. `analyze_NE`: analyze the characteristics of Nash equilibria for different combinations.
7. `relative_actions`: computes the relative usage of different action types compared to the
optimal solution.
8. `cooperation_gain`: computes the gain for each player from chainging thier types.
'''
# %%
# COST_TYPES = ['Total'] # 'Under Supply', 'Over Supply'
# REF_METHOD = 'dp_inmrp'
# METHOD_NAMES = ['dp_inmrp']
# # 'ng', 'jc', 'tdindp', 'ng', 'bgCCCCUUUU', 'dp_indp', 'dp_jc', 'bgCNUU', 'inmrp', 'dp_inmrp'
#
# COMBS, OPTIMAL_COMBS = dindputils_v2.generate_combinations(FAIL_SCE_PARAM['TYPE'], FAIL_SCE_PARAM['L1_RANGE'],
# FAIL_SCE_PARAM['L2_RANGE'], LAYERS, RC, METHOD_NAMES,
# list_high_dam_add=FAIL_SCE_PARAM['FILTER_SCE'],
# synthetic_dir=SYNTH_DIR)
#
# BASE_DF, objs = dindputils_v2.read_results(COMBS, OPTIMAL_COMBS, COST_TYPES, root_result_dir=OUTPUT_DIR, deaggregate=True)
#
# # LAMBDA_DF = dindputils_v2.relative_performance(BASE_DF, COMBS, OPTIMAL_COMBS, ref_method=REF_METHOD,
# # cost_type=COST_TYPES[0], deaggregate=True)
# # RES_ALLOC_DF, ALLOC_GAP_DF = dindputils_v2.read_resource_allocation(BASE_DF, COMBS, OPTIMAL_COMBS,
# # objs, root_result_dir=OUTPUT_DIR,
# # ref_method=REF_METHOD)
# # RUN_TIME_DF = dindputils_v2.read_run_time(COMBS, OPTIMAL_COMBS, objs, root_result_dir=OUTPUT_DIR)
# # ANALYZE_NE_DF = gameutils.analyze_NE(objs, COMBS, OPTIMAL_COMBS)
# # REL_ACTION_DF = gameutils.relative_actions(ANALYZE_NE_DF, COMBS)
#
# # COOP_GAIN, COOP_GAIN_TIME = gameutils.cooperation_gain(BASE_DF, LAMBDA_DF, COMBS, ref_state='bgNNUU',
# # states=['bgCCUU', 'bgCNUU', 'bgNCUU'])
# %%
'''
### Save Variables to file
All dictionaries that are made in the postprocessing step are saved here.
'''
# %%
# OBJ_LIST = [COMBS, OPTIMAL_COMBS, BASE_DF, METHOD_NAMES, LAMBDA_DF, RES_ALLOC_DF,
# ALLOC_GAP_DF, RUN_TIME_DF, COST_TYPES, ANALYZE_NE_DF, REL_ACTION_DF]
# OBJ_LIST = [COMBS, OPTIMAL_COMBS, BASE_DF, METHOD_NAMES, LAMBDA_DF]
# with open(OUTPUT_DIR + 'postprocess_dicts.pkl', 'wb') as f:
# pickle.dump(OBJ_LIST, f)
# %%
'''
### Plot results
Plot functions use the dictionaries that are made in the postprocessing step to
make output figures:
1. `plot_performance_curves`: plots costs (in `COST_TYPES`) and unmet demand vs. time.
2. `plot_separated_perform_curves`: plots costs (in `COST_TYPES`) vs. time for each layer sepearately.
3. `plot_relative_performance`: plots relative performances.
4. `plot_auction_allocation`: plots resource allocation vs. time.
5. `plot_relative_allocation`: plots allocation gaps.
6. `plot_run_time`: plots run time vs. time.
7. `plot_ne_analysis`: plots NE analysis measures vs. time (for games only).
8. `plot_ne_cooperation`: plots action types vs. time (for games only).
9. `plot_payoff_hist`: plots size of the payoff matrix vs. time (for games only).
10. `plot_relative_actions`: plots relative action usage (for games only).
'''
# %%
plt.close('all')
### Getting back the objects ###
# import pickle
#
# results_dir = OUTPUT_DIR
# with open(results_dir + 'postprocess_dicts.pkl', 'rb') as f:
# [COMBS, OPTIMAL_COMBS, BASE_DF, METHOD_NAMES, LAMBDA_DF, RES_ALLOC_DF,
# ALLOC_GAP_DF, RUN_TIME_DF, COST_TYPE, ANALYZE_NE_DF, REL_ACTION_DF] = pickle.load(f)
# plots.plot_performance_curves(BASE_DF,
# cost_type='Total', ci=95,
# deaggregate=False, plot_resilience=True)
# plots.plot_relative_performance(LAMBDA_DF[(LAMBDA_DF['auction_type'] != 'UNIFORM') & \
# ((LAMBDA_DF['no_resources'] != 8)&(LAMBDA_DF['no_resources'] != 12))],
# lambda_type='U')
# plots.plot_ne_analysis(ANALYZE_NE_DF, ci=None)
# plots.plot_ne_cooperation(ANALYZE_NE_DF, ci=None)
# plots.plot_relative_actions(REL_ACTION_DF)
# plots.plot_cooperation_gain(COOP_GAIN, ref_state = 'bgNNUU',
# states = ['bgCCUU', 'bgCNUU', 'bgNCUU'])
# # # plots.plot_separated_perform_curves(BASE_DF, x='t', y='cost', cost_type='Total',
# # # ci=95, normalize=False)
# # plots.plot_auction_allocation(RES_ALLOC_DF, ci=95)
# # plots.plot_relative_allocation(ALLOC_GAP_DF, distance_type='gap')
# # plots.plot_run_time(RUN_TIME_DF, ci=95)
# plots.plot_payoff_hist(ANALYZE_NE_DF, compute_payoff_numbers=True, outlier=False)
# [(REL_ACTION_DF['auction_type']!='UNIFORM')]
|
from enum import Enum
from math import radians, sin, cos
from time import sleep
import cv2
import numpy as np
from functions import pinhole_projection
from pid import PID
from vrep_object import VRepClient, VRepObject
class Visibility(Enum):
VISIBLE = 1
NOT_VISIBLE = 2
UNREACHABLE = 3
class Drone(VRepObject):
SAFETY_RADIUS = 0.3 # meters. Value to add to the real radius of the UAV
def __init__(self, client: VRepClient):
self._body = client.get_object("Quadricopter_base")
self._model = client.get_object("Quadricopter")
super().__init__(client.id, self._body.handle, "")
self._target = client.get_object("Quadricopter_target")
self.sensor = client.get_object("fast3DLaserScanner_sensor")
self._rotation_pid = PID(0.2, 0.05, 0.2, 1, max_int=3)
self._altitude_pid = PID(0.2, 0.02, 0.2, 1)
self._pid = PID(4.5, 0.01, 0.1, 3, 0.15, max_int=10)
self.total_distance = 0
self.sensor_offset = self._body.get_position(self.sensor)
self.radius = self._get_radius() + self.SAFETY_RADIUS
# Base and height of the visibility cone (actually a pyramid)
B = 2 * self.sensor.max_depth * sin(radians(self.sensor.angle))
H = 2 * self.sensor.max_depth * cos(radians(self.sensor.angle))
# Constant for pixel-to-meters conversion
self.K = self.sensor.res
if abs(B - H) > 1e-3:
self.K *= H / B
def _get_radius(self) -> float:
"""Return the effective radius of the drone
The radius is half of the distance between the extrema of the model's
bounding box.
"""
bbox = self._model.get_bbox()
bbox_span = np.linalg.norm(bbox[1]-bbox[0])
return bbox_span / 2
def altitude_adjust(self, goal: VRepObject) -> None:
good = err = 0.5 # meters
while abs(err) >= good:
goal_pos = goal.get_position(self._body)
err = goal_pos[2] # z-coordinate
correction = self._altitude_pid.control(err)
if __debug__:
print("Adjusting altitude...", correction)
self._target.set_position(self._target.get_position() +
np.array([0, 0, correction]))
self.total_distance += np.linalg.norm(correction)
sleep(1)
else:
if __debug__:
print("...Adjusted. Goal at {} m".format(err))
self._altitude_pid.reset()
sleep(2) # Wait for the drone to stabilize
def can_reach(self, goal: VRepObject):
dist, azimuth, elevation = goal.get_spherical(self._body,
self.sensor_offset)
delta = goal.get_position(self._target)
h_dist = np.linalg.norm(delta[0:2])
res, d = self.sensor.get_depth_buffer()
X, Y = pinhole_projection(azimuth, elevation)
ball_r = self.radius_to_pixels(dist)
mask = cv2.circle(np.zeros_like(d), (X, Y), ball_r, 1, -1)
try:
min_depth = np.min(d[mask == 1]) * self.sensor.max_depth
except ValueError:
# Mask has no white pixel. Center view on goal and retry
self.lock(goal)
return self.can_reach(goal)
reachable = h_dist < 1 or dist - min_depth < -self.radius or \
min_depth == self.sensor.max_depth
return reachable, d, min_depth, mask
def radius_to_pixels(self, dist: float) -> int:
"""Converts a drone radius in pixels, at the given distance.
This function returns the size in pixels of a segment of length RADIUS,
placed at distance `dist` and orthogonal to the principal axis of the
camera.
"""
return max(int(self.K * self.radius / dist), 1)
def reset_controllers(self):
self._pid.reset()
self._altitude_pid.reset()
self._rotation_pid.reset()
def rotate_towards(self, goal: VRepObject):
"""Rotate the drone until it points towards the goal.
Actually, the function rotates the `target` object which is then
followed by the `drone` (inside V-REP).
"""
good = azimuth = 2 # Degrees
while abs(azimuth) >= good:
euler = self._target.get_orientation()
__, azimuth, __ = goal.get_spherical(self._body,
self.sensor_offset)
correction_angle = self._rotation_pid.control(azimuth)
if __debug__:
print("Adjusting orientation...", correction_angle)
euler[2] += radians(correction_angle) # euler[2] = Yaw
self._target.set_orientation(euler)
sleep(1)
else:
if __debug__:
print("...Adjusted. Goal at {}°".format(azimuth))
self._rotation_pid.reset()
self.stabilize() # Wait for the drone to stabilize on new angle
def lock(self, goal: VRepObject):
__, azimuth, elevation = goal.get_spherical(self._body,
self.sensor_offset)
X, Y = pinhole_projection(azimuth, elevation)
if abs(elevation) > self.sensor.angle or not 0 <= Y < 256:
self.altitude_adjust(goal)
self.rotate_towards(goal)
def stabilize(self):
eps = 0.001
if __debug__:
print("UAV stabilization in progress...")
while True:
lin_v, ang_v = self._body.get_velocity()
if all(i < eps for i in lin_v) and all(i < eps for i in ang_v):
if __debug__:
sleep(0.5)
print("...done.")
return
else:
sleep(0.05)
def step_towards(self, goal: VRepObject):
"""Move the drone towards the goal.
"""
target_pos = self._target.get_position()
correction = self._pid.control(-self._target.get_position(goal))
self.total_distance += np.linalg.norm(correction)
self._target.set_position(target_pos + correction)
def escape(self, goal):
# TODO implement wall-following algorithm
self.rotate(60)
__, d = self.sensor.get_depth_buffer()
left_space = len(d[d == 1])
self.rotate(-120)
__, d = self.sensor.get_depth_buffer()
right_space = len(d[d == 1])
go_left = left_space >= right_space
def rotate(self, angle: float):
"""Perform an arbitrary yaw rotation.
Args:
angle (float): Yaw angle, in degrees. Positive = rotates left
"""
self._rotation_pid.reset()
while abs(angle) > 2:
euler = self._target.get_orientation()
correction = self._rotation_pid.control(angle)
angle -= correction
euler[2] += radians(correction) # euler[2] = Yaw
self._target.set_orientation(euler)
sleep(1)
self.stabilize()
|
# Copyright 2021 The TEMPO Collaboration
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the time_evovling_mpo.tempo module.
"""
import pytest
import numpy as np
import time_evolving_mpo as tempo
def test_tempo_parameters():
tempo_param = tempo.TempoParameters(0.1, None, 1.0e-5, "rough", "bla", {})
str(tempo_param)
assert tempo_param.dt == 0.1
assert tempo_param.dkmax == None
assert tempo_param.epsrel == 1.0e-5
tempo_param.dt = 0.05
tempo_param.dkmax = 42
tempo_param.epsrel = 1.0e-6
assert tempo_param.dt == 0.05
assert tempo_param.dkmax == 42
assert tempo_param.epsrel == 1.0e-6
del tempo_param.dkmax
assert tempo_param.dkmax == None
def test_tempo_parameters_bad_input():
with pytest.raises(AssertionError):
tempo.TempoParameters("x", 42, 1.0e-5, "rough", "bla", {})
with pytest.raises(AssertionError):
tempo.TempoParameters(0.1, "x", 1.0e-5, "rough", "bla", {})
with pytest.raises(AssertionError):
tempo.TempoParameters(0.1, 42, "x", "rough", "bla", {})
def test_tempo():
start_time = -0.3
end_time1 = 0.4
end_time2 = 0.6
end_time3 = 0.84
system = tempo.System(0.5 * tempo.operators.sigma("x"))
correlation_function = lambda t: (np.cos(6.0*t)+1j*np.sin(6.0*t)) \
* np.exp(-12.0*t)
correlations = tempo.CustomCorrelations(correlation_function,
max_correlation_time=0.5)
bath = tempo.Bath(0.5 * tempo.operators.sigma("z"), correlations)
initial_state = tempo.operators.spin_dm("z+")
tempo_param_A = tempo.TempoParameters(0.1, 5, 1.0e-5, name="rough-A")
tempo_sys_A = tempo.Tempo(system=system,
bath=bath,
parameters=tempo_param_A,
initial_state=initial_state,
start_time=start_time)
assert tempo_sys_A.dimension == 2
tempo_sys_A.compute(end_time=end_time1, progress_type="bar")
tempo_sys_A.compute(end_time=end_time2, progress_type="silent")
tempo_sys_A.compute(end_time=end_time3, progress_type="simple")
dyn_A = tempo_sys_A.get_dynamics()
assert len(dyn_A.times) == 13
def test_tempo_bad_input():
start_time = -0.3
end_time = 0.84
system = tempo.System(0.5 * tempo.operators.sigma("x"))
correlation_function = lambda t: (np.cos(6.0*t)+1j*np.sin(6.0*t)) \
* np.exp(-12.0*t)
correlations = tempo.CustomCorrelations(correlation_function,
max_correlation_time=0.5)
bath = tempo.Bath(0.5 * tempo.operators.sigma("z"), correlations)
initial_state = tempo.operators.spin_dm("z+")
tempo_param_A = tempo.TempoParameters(0.1, 5, 1.0e-5, name="rough-A")
with pytest.raises(AssertionError):
tempo_sys_A = tempo.Tempo(system=system,
bath=bath,
parameters=tempo_param_A,
initial_state="bla",
start_time=start_time)
with pytest.raises(AssertionError):
tempo_sys_A = tempo.Tempo(system=system,
bath=bath,
parameters=tempo_param_A,
initial_state=initial_state,
start_time="bla")
tempo_sys_A = tempo.Tempo(system=system,
bath=bath,
parameters=tempo_param_A,
initial_state=initial_state,
start_time=start_time)
with pytest.raises(AssertionError):
tempo_sys_A.compute(end_time="bla", progress_type="bar")
def test_guess_tempo_parameters():
system = tempo.System(0.5 * tempo.operators.sigma("x"))
correlation_function = lambda t: (np.cos(t)+1j*np.sin(6.0*t)) * np.exp(-2.0*t)
correlations = tempo.CustomCorrelations(correlation_function,
max_correlation_time=10.0)
bath = tempo.Bath(0.5 * tempo.operators.sigma("z"), correlations)
with pytest.warns(UserWarning):
param = tempo.guess_tempo_parameters(bath=bath,
start_time=0.0,
end_time=15.0)
with pytest.raises(AssertionError): # bad start time input
param = tempo.guess_tempo_parameters(bath=bath,
start_time="bla",
end_time=15.0)
with pytest.raises(AssertionError): # bad end time input
param = tempo.guess_tempo_parameters(bath=bath,
start_time=0.0,
end_time="bla")
with pytest.raises(ValueError): # bad start/end time (swapped)
param = tempo.guess_tempo_parameters(bath=bath,
start_time=10.0,
end_time=0.0)
with pytest.raises(AssertionError): # bad tollerance
param = tempo.guess_tempo_parameters(bath=bath,
start_time=0.0,
end_time=15.0,
tollerance="bla")
with pytest.raises(AssertionError): # bad tollerance (negative)
param = tempo.guess_tempo_parameters(bath=bath,
start_time=0.0,
end_time=15.0,
tollerance=-0.2)
with pytest.warns(UserWarning): # reach MAX_DKMAX
param = tempo.guess_tempo_parameters(bath=bath,
start_time=0.0,
end_time=15.0,
tollerance=1.0e-12)
def test_tempo_compute():
start_time = -0.3
end_time = 0.84
system = tempo.System(0.5 * tempo.operators.sigma("x"))
correlation_function = lambda t: (np.cos(6.0*t)+1j*np.sin(6.0*t)) \
* np.exp(-12.0*t)
correlations = tempo.CustomCorrelations(correlation_function,
max_correlation_time=0.5)
bath = tempo.Bath(0.5 * tempo.operators.sigma("z"), correlations)
initial_state = tempo.operators.spin_dm("z+")
with pytest.warns(UserWarning):
dyn_A = tempo.tempo_compute(system=system,
bath=bath,
initial_state=initial_state,
start_time=start_time,
end_time=end_time)
def test_tempo_dynamics_reference():
system = tempo.System(0.5 * tempo.operators.sigma("x"))
correlations = tempo.PowerLawSD(alpha=0.1,
zeta=1,
cutoff=1.0,
cutoff_type='exponential',
max_correlation_time=0.5)
bath = tempo.Bath(0.5 * tempo.operators.sigma("z"), correlations)
tempo_parameters = tempo.TempoParameters(dt=0.1, dkmax=10, epsrel=10**(-4))
tempo_A= tempo.Tempo(system=system,
bath=bath,
parameters=tempo_parameters,
initial_state=tempo.operators.spin_dm("up"),
start_time=0.0)
dynamics_1 = tempo_A.compute(end_time=0.2)
t_1, sz_1 = dynamics_1.expectations(tempo.operators.sigma("z"))
tempo_A.compute(end_time=0.4)
dynamics_2 = tempo_A.get_dynamics()
t_2, sz_2 = dynamics_2.expectations(tempo.operators.sigma("z"))
assert dynamics_1 == dynamics_2
assert len(t_2) > len(t_1)
|
<gh_stars>10-100
# coding: utf-8
'''
Module for composite material analysis
Hyer-Stress Analysis of Fiber-Reinforced Composite Materials
Herakovich-Mechanics of Fibrous Composites
Daniel-Engineering Mechanics of Composite Materials
Kollar-Mechanics of COmposite Structures
NASA- Basic Mechancis of Lamianted Composites
https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf
TODO:
* transverse shear stress reddy pg 136 or daniel pg 139
* include line loads (Qx,Qy) for combined loading
* calculate capability of panel based on margin
'''
#==============================================================================
# Import Modules
#==============================================================================
from __future__ import print_function, division
__author__ = '<NAME> <<EMAIL>>'
__date__ = '2016-12-02'
__version__ = 0.1
from copy import copy
from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi
from numpy.linalg import solve, inv
#from scipy import linalg
import numpy as np
#np.set_printoptions(suppress=False,precision=2) # suppress scientific notation
np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf)
import scipy
from scipy.spatial import ConvexHull
#np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)})
import pandas as pd
import sympy as sp
from sympy import Function, dsolve, Eq, Derivative, symbols, pprint
from sympy.plotting import plot3d
#from sympy import cos, sin
#sp.init_printing(use_latex='mathjax')
#sp.init_printing(wrap_line=False, pretty_print=True)
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (8,5)
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 14
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \
grid, show, xlabel,ylabel, tight_layout
from mpl_toolkits.mplot3d import axes3d
# if using ipython console, turn off inline plotting
#mpl.use('Qt5Agg')
# inline plotting
from IPython import get_ipython
#get_ipython().magic('matplotlib inline')
###disable inline plotting
try:
get_ipython().magic('matplotlib')
except:
pass
from IPython.display import display
import os
plt.close('all')
#==============================================================================
# Functions
#==============================================================================
def import_matprops(mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def Sf(E1,E2,nu12,G12):
'''transversely isptropic compliance matrix. pg 58 herakovich'''
nu21 = E2*nu12/E1
S = array([[1/E1, -nu21/E2, 0],
[-nu12/E1, 1/E2, 0],
[0, 0, 1/G12]])
return S
def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic compliance matrix.
For transversly isotropic
E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23))
'''
S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0],
[-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0],
[-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0],
[ 0, 0, 0, 1/G23, 0, 0],
[ 0, 0, 0, 0, 1/G13, 0],
[ 0, 0, 0, 0, 0, 1/G12]])
return S6
def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23):
'''
daniel pg 74
transversely isotropic stiffness matrix.
'''
C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23))
return C6
def Qf(E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def T61(th):
'''Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
reddy pg 91'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n],
[n**2, m**2, 0, 0, 0,-2*m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-m*n, m*n, 0, 0, 0,(m**2-n**2)]])
return T1
def T62(th):
'''Strain
voight notation for strain transform. epsilon1 = T2 @ epsilonx
th=ply angle in degrees
reddy pg 91
'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, 0, 0, 0, m*n],
[n**2, m**2, 0, 0, 0,-m*n],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, m,-n, 0],
[0, 0, 0, n, m, 0],
[-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]])
return T2
def T1(th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2(th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def T1s(th):
'''Symbolic Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T1 = sp.Matrix( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def T2s(th):
'''Symbolic Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sp.sin(th*sp.pi/180)
m = sp.cos(th*sp.pi/180)
T2 = sp.Matrix( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
def failure_envelope():
# failure envelopes
# max stress criteria
# 1 direction in first row
# 2 direction in second row
# failure strength in compression
#Fc = matrix([[-1250.0, -600.0],
# [-200.0, -120.0]]) # ksi
#
##failure strength in tension
#Ft = matrix([[1500, 1000]
# [50, 30]]) # ksi
#
##Failure strength in shear
#Fs = matrix( [100, 70] ) # Shear
Fc1 = [-1250, -600] # Compression 1 direction
Fc2 = [-200, -120] # Compression 2 direction
Ft1 = [1500, 1000] # Tension 1 direction
Ft2 = [50, 30] # Tension 2 direction
Fs = [100, 70] # Shear
# F1 = Ft(1);
# F2 = Ft(1);
# F6 = Fs(1);
for c in range(2):# mattype
factor = 1.25
# right
plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]])
# left
plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]])
# top
plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]])
# bottom
plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]])
# center horizontal
plot( [Fc1[c], Ft1[c]] , [0, 0])
# center vertical
plot( [0, 0] , [Fc2[c], Ft2[c]])
#xlim([min(Fc1) max(Ft1)]*factor)
#ylim([min(Fc2) max(Ft2)]*factor)
xlabel('$\sigma_1,ksi$')
ylabel('$\sigma_2,ksi$')
title('failure envelope with Max-Stress Criteria')
def material_plots(materials = ['Carbon_cloth_AGP3705H']):
'''
plotting composite properties
Sf(E1,E2,nu12,G12)
'''
# plt.rcParams['figure.figsize'] = (10, 8)
# plt.rcParams['font.size'] = 14
# plt.rcParams['legend.fontsize'] = 14
plyangle = arange(-45, 45.1, 0.1)
h = 1 # lamina thickness
layupname='[0]'
mat = import_matprops(materials)
Ex = mat[materials[0]].E1
Ey = mat[materials[0]].E2
nuxy = mat[materials[0]].nu12
Gxy = mat[materials[0]].G12
# layupname = '[0, 45, 45, 0]'
# Ex= 2890983.38
# Ey= 2844063.06
# nuxy= 0.27
# Gxy= 1129326.25
# h = 0.0600
plt.close('all')
S = Sf(Ex,Ey,nuxy,Gxy)
C = inv(S)
C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle]
C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle]
C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle]
C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle]
Exbar = zeros(len(plyangle))
Eybar = zeros(len(plyangle))
Gxybar = zeros(len(plyangle))
Q = Qf(Ex,Ey,nuxy,Gxy)
Qbar = zeros((len(plyangle),3,3))
for i,th in enumerate(plyangle):
Qbar[i] = solve(T1(th), Q) @ T2(th)
#Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle]
Qbar11 = Qbar[:,0,0]
Qbar22 = Qbar[:,1,1]
Qbar66 = Qbar[:,2,2]
Qbar12 = Qbar[:,0,1]
Qbar16 = Qbar[:,0,2]
Qbar26 = Qbar[:,1,2]
Aij = Qbar*h
# laminate Stiffness
# | Exbar Eybar Gxybar |
# A = | vxybar vyxbar etasxbar |
# | etaxsbar etaysbar etasybar |
# laminate Comnpliance
aij = zeros((len(plyangle),3,3))
for i, _Aij in enumerate(Aij):
aij[i] = inv(_Aij)
# material properties for whole laminate (Daniel, pg183)
Exbar = [1/(h*_aij[0,0]) for _aij in aij]
Eybar = [1/(h*_aij[1,1]) for _aij in aij]
Gxybar = [1/(h*_aij[2,2]) for _aij in aij]
# Global Stress
s_xy = array([[100],
[10],
[5]])
# local ply stress
s_12 = np.zeros((3,len(plyangle)))
for i,th in enumerate(plyangle):
#s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses
s_12[:,[i]] = T1(th) @ s_xy
# Plotting
figure()#, figsize=(10,8))
plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12)
legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$'])
title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208')
xlabel("$\Theta$")
ylabel('$\overline{C}_{ii}$, ksi')
grid()
figure()#, figsize=(10,8))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title("Constitutive Properties in various angles")
xlabel("$\Theta$")
ylabel("modulus, psi")
legend()
grid()
figure()#,figsize=(10,8))
plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' )
plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' )
plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' )
legend(loc='lower left')
xlabel("$\Theta$")
ylabel("Stress, ksi")
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar11, label = "Qbar11")
plot(plyangle,Qbar22, label = "Qbar22")
plot(plyangle,Qbar66, label = "Qbar66")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
# plot plyangle as a function of time
figure()#,figsize=(10,8))
plot(plyangle,Qbar12, label = "Qbar12")
plot(plyangle,Qbar16, label = "Qbar16")
plot(plyangle,Qbar26, label = "Qbar26")
legend(loc='lower left')
xlabel("$\Theta$")
ylabel('Q')
grid()
titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname)
#df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar})
#print(df)
#df.to_csv(titlename+'.csv')
plt.figure(figsize=(9,6))
plot(plyangle, Exbar, label = r"Modulus: $E_x$")
plot(plyangle, Eybar, label = r"Modulus: $E_y$")
plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$")
title(titlename)
xlabel("$\Theta$")
ylabel("modulus, psi")
legend(loc='best')
grid()
#plt.savefig(titlename+'.png')
show()
def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True):
'''
## function created to quickly create laminates based on given parameters
lamthk=1.5 # total #thickness of laminate
symang = [45,0,90, 30] #symmertic ply angle
plyratio=2.0 # lamina/matrix ratio
matrixlayers=False # add matrix layers between lamina plys
nonsym=False # symmetric
mat = material type, as in different plies, matrix layer, uni tapes, etc
#ply ratio can be used to vary the ratio of thickness between a matrix ply
and lamina ply. if the same thickness is desired, plyratio = 1,
if lamina is 2x as thick as matrix plyratio = 2
'''
if matrixlayers:
nply = (len(symang)*2+1)*2
nm = nply-len(symang)*2
nf = len(symang)*2
tm = lamthk / (plyratio*nf + nm)
tf = tm*plyratio
plyangle = zeros(nply//2)
mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2,
mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ]
plyangle[1:-1:2] = symang[:] # make a copy
thk = tm*ones(nply//2)
thk[2:2:-1] = tf
lamang = list(symang) + list(symang[::-1])
plyangle = list(plyangle) + list(plyangle[::-1])
mat = list(mat) + list(mat[::-1])
thk = list(thk) + list(thk[::-1])
else: # no matrix layers, ignore ratio
if balancedsymmetric:
nply = len(symang)*2
mat = list(3*np.ones(nply))
thk = list(lamthk/nply*np.ones(nply))
lamang = list(symang) + list(symang[::-1])
plyangle = list(symang) + list(symang[::-1])
else:
nply = len(symang)
mat =[1]*nply
thk = list(lamthk/nply*np.ones(nply))
lamang = symang[:]
plyangle = symang[:]
return thk,plyangle,mat,lamang
def make_quasi(n0=4,n45=4):
#n0 = 4
#n45 = 13
#
#ply0 = [0]*n0
#ply45 = [45]*n45
#plyangle = []
#from itertools import zip_longest
#for x,y in zip_longest(ply0,ply45):
# if len(plyangle)<min(len(ply0),len(ply45))*2:
# plyangle.append(x)
# plyangle.append(y)
# else:
# plyangle.append(x)
# plyangle.reverse()
# plyangle.append(y)
#plyangle = [x for x in plyangle if x is not None]
#plyangle
ntot = n45+n0
plyangle = [45]*int(n45)
for p in [0]*int(n0):
plyangle.append(p)
plyangle.reverse()
return plyangle
<EMAIL>
def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints):
'''
code to compute composite properties, applied mechanical and thermal loads
and stress and strain
inputs
NM # force/moments lbs/in
ek # strain, curvature in/in
q0 = pressure
plyangle # angle for each ply
plymatindex # material for each ply
materials # list materials used,
general outline for computing elastic properties of composites
1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12
2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system
3) Determine Fiber orientation of each ply
4) Calculate the transformed stiffness Qxy in the global coordinate system
5) Determine the through-thicknesses of each ply
6) Determine the laminate stiffness Matrix (ABD)
7) Calculate the laminate compliance matrix by inverting the ABD matrix
8) Calculate the laminate engineering properties
# Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix
|sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx |
|sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky |
|sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy|
# Herakovich pg 84
Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2
transformation reminders - see Herakovich for details
sig1 = T1*sigx
sigx = inv(T1)*sig1
eps1 = T2*epsx
epsx = inv(T2)*epsx
sigx = inv(T1)*Q*T2*epsx
Qbar = inv(T1)*Q*T2
Sbar = inv(T2)*inv(Q)*T2
Notes, core transverse direction is G13, ribbon direction is G23
a_width = 50 # plate width (inches or meters)
b_length = 50 # laminate length, inches or meters
'''
#==========================================================================
# Initialize python settings
#==========================================================================
#get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Define composite properties
#==========================================================================
assert(len(plyangle)==len(plymatindex))
a_width, b_length = platedim
# either apply strains or loads , lb/in
Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM
NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]])
ex_, ey_, exy_, kx_, ky_, kxy_ = ek
epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]])
Ti = 0 # initial temperature (C)
Tf = 0 # final temperature (C)
#SF = 1.0 # safety factor
#==========================================================================
# Import Material Properties
#==========================================================================
mat = import_matprops(materials)
#mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich
alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]])
''' to get ply material info, use as follows
alpha = alphaf(mat[materials[plymatindex[i]]])
mat[materials[1]].E2
'''
laminatethk = array([mat[materials[i]].plythk for i in plymatindex ])
nply = len(laminatethk) # number of plies
H = np.sum(laminatethk) # plate thickness
# area = a_width*H
z = zeros(nply+1)
zmid = zeros(nply)
z[0] = -H/2
for i in range(nply):
z[i+1] = z[i] + laminatethk[i]
zmid[i] = z[i] + laminatethk[i]/2
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
A += Qbar*(z[i+1]-z[i])
# coupling stiffness
B += (1/2)*Qbar*(z[i+1]**2-z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(z[i+1]**3-z[i]**3)
#Cbar6 = T61 @ C6 @ np.transpose(T61)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B + zoffset*A
ABD[3:6,0:3] = B + zoffset*A
ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A
# laminatee compliance
abcd = inv(ABD)
a = abcd[0:3,0:3]
#==========================================================================
# Laminate Properties
#==========================================================================
# effective laminate shear coupling coefficients
etasxbar = a[0,2]/a[2,2]
etasybar = a[1,2]/a[2,2]
etaxsbar = a[2,0]/a[0,0]
etaysbar = a[2,1]/a[1,1]
# laminate engineer properties
Exbar = 1 / (H*a[0,0])
Eybar = 1 / (H*a[1,1])
Gxybar = 1 / (H*a[2,2])
nuxybar = -a[0,1]/a[0,0]
nuyxbar = -a[0,1]/a[1,1]
# TODO: validate results, does not appear to be correct
# strain centers, pg 72, NASA-Basic mechanics of lamianted composites
# added divide by zero epsilon
z_eps0_x = -B[0,0] / (D[0,0] + 1e-16)
z_eps0_y = -B[0,1] / (D[0,1] + 1e-16)
z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16)
z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center
# --------------------- Double Check ---------------------
# # Laminate compliance matrix
# LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar],
# [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar],
# [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] )
# # Daniel pg 183
# # combines applied loads and applied strains
# strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3]
# Nxyz = A @ strain_laminate
# stress_laminate = Nxyz/H
# --------------------------------------------------------
#==========================================================================
# Pressure Load
#==========================================================================
#==========================================================================
# pressure displacement and moments
#==========================================================================
D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2]
B11 = B[0,0]
A11, A12 = A[0,0], A[0,1]
# reddy pg 247 Navier displacement solution for a simply supported plate
s = b_length/a_width
x = a_width/2
y = b_length/2
# 5.2.8, reddy, or hyer 13.123
terms = 5
w0 = 0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
w0 += Wmn * sin(alpha*x) * sin(beta*y)
w0_simplesupport = w0
# 5.2.12a, reddy
# mid span moments
Mxq=Myq=Mxyq=0
for m in range(1,terms,2):
for n in range(1,terms,2):
dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4)
alpha = m*pi/a_width
beta = n*pi/b_length
# for uniformly distributed loads, m,n = 1,3,5,...
Qmn = 16*q0/(pi**2*m*n)
Wmn = Qmn/dmn
Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length)
Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length)
Mxyq = -2*Mxyq
NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]]
# hyer, x-pin-pin, y-free-free plate reaction forces, pg 619
# Forces and Moments across the width of the plate
A11R = A11*(1-B11**2/(A11*D11))
D11R = D11*(1-B11**2/(A11*D11))
Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12
Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2)
Nxyq0 = lambda x: 0
Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2)
Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2))
Mxyq0 = lambda x: 0
# clamped plate 5.4.11, reddy
#w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) )
# reddy, 5.4.12
w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4)
# reddy, 5.4.15
#w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4)
# reddy 5.4.15, for isotropic D11=D
w0_clamped_isotropic = 0.00134*q0*a_width**4/D11
#==========================================================================
# Applied Loads and pressure loads
#==========================================================================
NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp
#==========================================================================
# Thermal Loads
#==========================================================================
'''
if the material is isotropic and unconstrained, then no thermal stresses
will be experienced. If there are constraints, then the material will experience
thermally induced stresses. As with orthotropic materials, various directions will have
different stresses, and when stacked in various orientations, stresses can be
unintuitive and complicated. Global Thermal strains are subtracted from applied strains
# 1) determine the free unrestrained thermal strains in each layer, alphabar
'''
dT = Tf-Ti
Nhatth= zeros((3,1)) # unit thermal force in global CS
Mhatth = zeros((3,1)) # unit thermal moment in global CS
alphabar = zeros((3,nply)) # global ply CTE
for i in range(nply): # = nply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
alpha = alphaf(mat[materials[plymatindex[i]]])
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
alphabar[:,[i]] = solve(T2(plyangle[i]), alpha)
#alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS
Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads
Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2)
NMhatth = np.vstack((Nhatth,Mhatth))
NMbarth = NMhatth*dT # resultant thermal loads
# Laminate CTE
epsilonhatth = abcd@NMhatth # laminate CTE
# applied loads and thermal loads
epsilonbarapp = abcd @ NMbarapptotal
epsilonbarth = abcd @ NMbarth # resultant thermal strains
epsilonbartotal = epsilonbarapp + epsilonbarth
# Composite respone from applied mechanical loads and strains. Average
# properties only. Used to compare results from tensile test.
#epsilon_laminate = abcd@NMbarapptotal
#sigma_laminate = ABD@epsilon_laminate/H
epsilon_laminate = epsilonbartotal[:]
sigma_laminate = ABD@epsilonbartotal/H
alpha_laminate = a@Nhatth
# determine thermal load and applied loads or strains Hyer pg 435,452
Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test
Ny = NMbarapptotal[1,0]*b_length # units kN
#==========================================================================
# Thermal and mechanical local and global stresses at the ply interface
#==========================================================================
# Declare variables for plotting
epsilon_app = zeros((3,2*nply))
sigma_app = zeros((3,2*nply))
epsilonbar_app = zeros((3,2*nply))
sigmabar_app = zeros((3,2*nply))
epsilon_th = zeros((3,2*nply))
sigma_th = zeros((3,2*nply))
epsilonbar_th = zeros((3,2*nply))
sigmabar_th = zeros((3,2*nply))
epsilon = zeros((3,2*nply))
epsilonbar = zeros((3,2*nply))
sigma = zeros((3,2*nply))
sigmabar = zeros((3,2*nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress is calcuated at top and bottom of each ply
Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 )
Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i])
### transverse shear, herakovich pg 254
#Q44 = mat[materials[plymatindex[i]]].G23
#Q55 = mat[materials[plymatindex[i]]].G13
#Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2
#Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2
#Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i])
#epsilontransverse = array([[gammayz],[gammaxz]])
#sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse
# Global stresses and strains, applied load only
epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7]
epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7]
sigbarapp1 = Qbar @ epsbarapp1
sigbarapp2 = Qbar @ epsbarapp2
# Local stresses and strains, appplied load only
epsapp1 = T2(plyangle[i]) @ epsbarapp1
epsapp2 = T2(plyangle[i]) @ epsbarapp2
sigapp1 = Q @ epsapp1
sigapp2 = Q @ epsapp2
# Interface Stresses and Strains
epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2))
epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2))
sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2))
sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2))
# Global stress and strains, thermal loading only
epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]]
sigbarth1 = Qbar @ epsbarth1
sigbarth2 = Qbar @ epsbarth2
# Local stress and strains, thermal loading only
epsth1 = T2(plyangle[i]) @ epsbarth1
epsth2 = T2(plyangle[i]) @ epsbarth2
sigth1 = Q @ epsth1
sigth2 = Q @ epsth2
# Interface Stresses and Strains
epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2))
epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2))
sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2))
# TOTAL global stresses and strains, applied and thermal
epsbar1 = epsbarapp1 + epsbarth1
epsbar2 = epsbarapp2 + epsbarth2
sigbar1 = Qbar @ epsbar1
sigbar2 = Qbar @ epsbar2
# TOTAL local stresses and strains , applied and thermal
eps1 = T2(plyangle[i]) @ epsbar1
eps2 = T2(plyangle[i]) @ epsbar2
sig1 = Q @ eps1
sig2 = Q @ eps2
# Interface Stresses and Strains
epsilon[:,k:k+2] = np.column_stack((eps1,eps2))
epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress
sigma[:,k:k+2] = np.column_stack((sig1,sig2))
sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2))
#==========================================================================
# Strength Failure Calculations
#==========================================================================
# Strength Ratio
STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply))
# Failure Index
FAILUREINDEX_MAXSTRESS = zeros((3,2*nply))
STRENGTHRATIO_TSAIWU = zeros((nply))
for i,k in enumerate(range(0,2*nply,2)):
# stress
s1 = sigma[0,k]
s2 = sigma[1,k]
s12 = np.abs(sigma[2,k])
# strength
F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c
F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# Max Stress failure index ,failure if > 1, then fail, FI = 1/SR
FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1
FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2
FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12
# Tsai Wu, failure occures when > 1
F1t = mat[materials[plymatindex[i]]].F1t
F1c = mat[materials[plymatindex[i]]].F1c
F2t = mat[materials[plymatindex[i]]].F2t
F2c = mat[materials[plymatindex[i]]].F2c
F12 = mat[materials[plymatindex[i]]].F12
# inhomogeneous Tsai-Wu criterion # from Daniel
# http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf
f1 = 1/F1t + 1/F1c
f2 = 1/F2t + 1/F2c
f11 = -1/(F1t*F1c)
f22 = -1/(F2t*F2c)
f66 = 1/F12**2
f12 = -0.5*sqrt(f11*f22)
#TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2
# polynomial to solve. Added a machine epsilon to avoid divide by zero errors
lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16
lam2 = f1*s1 + f2*s2 + 1e-16
lam3 = -1
# smallest positive root
roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) ,
(-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] )
STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio
# f1 = 1/F1t - 1/F1c
# f2 = 1/F2t - 1/F2c
# f11 = 1/(F1t*F1c)
# f22 = 1/(F2t*F2c)
# f66 = 1/F12**2
# STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2)))
### Apply safety factors
FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF
STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF
###
MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety
# strength ratio for max stress, if < 1, then fail, SR = 1/FI
STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16)
# margin of safety based on max stress criteria
MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1
# minimum margin of safety for Max stress failure
MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min()
FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max()
# minimum margin of safety of both Tsai-Wu and Max Stress
#MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() )
# find critial values for all failure criteria
#MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf
#MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf
#==========================================================================
# Buckling Failure Calculations
#==========================================================================
''' Buckling of Clamped plates under shear load, reddy, 5.6.17'''
k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4
k12 = 23.107/(a_width*b_length)
k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4
Nxycrit0 = 1/k12*np.sqrt(k11*k22)
FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1
MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1
'''Kassapoglous pg 126,137
simply supported plate buckling, assumes Nx>0 is compression
Nxcrit0 is the axial load that causes buckling
Nxycrit0 is the shear load that cause buckling
Nxcrit is the axial load part of a combined load that causes buckling
Nxycrit is the shear load part of a combined load that causes buckling
'''
# no buckling issues if Nx is positive
# buckling calcuations assumes Nx compression is positive.
Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0)
Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok
# Nxy=0
Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
# Nx=0
Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4)
FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0
if Nx__ == 0 or Nxy__ == 0:
FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0
FI_Nx0_buckling = (Nx__*SF)/Nxcrit0
else:
# interaction term
k = Nxy__ / Nx__
Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ,
abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) )
Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0)
# interactive calc
FI_Nx_buckling = (Nx__ *SF)/Nxcrit
FI_Nxy_buckling = (Nxy__*SF)/Nxycrit
FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling,
FI_Nx0_buckling,
FI_Nx_buckling,
FI_Nxy_buckling] )
MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1
#==========================================================================
# Facesheet Wrinkling
#==========================================================================
#==========================================================================
# principal lamainte stresses
#==========================================================================
sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0],
[sigma_laminate[2,0],sigma_laminate[1,0],0],
[0,0,0]]))[0]
tauxy_p = sigma_laminate[2,0]
sigmax_p = sigma_laminate[0,0]
sigmay_p = sigma_laminate[1,0]
thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi
#==========================================================================
# Printing Results
#==========================================================================
if prints:
print('--------------- laminate1 Stress analysis of fibers----------')
print('(z-) plyangles (z+)'); print(plyangle)
print('(z-) plymatindex (z+)'); print(plymatindex)
print('ply layers') ; print(z)
print('lamiante thickness, H = {:.4f}'.format(H))
#print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x))
#print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y))
#print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy))
#print('shear center laminate center, z_sc = {:.4f}'.format(z_sc))
print('Applied Loads'); print(NM)
print('ABD=');print(ABD)
print('Ex= {:.2f}'.format(Exbar) )
print('Ey= {:.2f}'.format(Eybar) )
print('nuxy= {:.2f}'.format(nuxybar) )
print('Gxy= {:.2f}'.format(Gxybar) )
print('epsilon_laminate') ; print(epsilon_laminate)
print('sigma_laminate') ; print(sigma_laminate)
print('sigma_principal_laminate') ; print(sigma_principal_laminate)
print('principal_angle = {:.2f} deg'.format(thetap))
print('NMbarapp') ; print(NMbarapp)
print('sigma') ; print(sigma)
print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) )
print(MARGINSAFETY_MAXSTRESS)
print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min()))
print(MARGINSAFETY_TSAIWU)
print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max ))
print(FAILUREINDEX_MAXSTRESS)
print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling))
# print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) )
# print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) )
# print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----')
# print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) )
# print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) )
# print('---- Buckling Failure Index (fail>1) combined loads and simple support -----')
# print(FI_combinedload_simplesupport_buckle)
print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling)))
print('Mx_midspan = {:.2f}'.format(Mxq) )
print('My_midspan = {:.2f}'.format(Myq) )
print('Mxy_midspan = {:.2f}'.format(Mxyq) )
print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) )
print('w0_clamped = {:.6f}'.format(w0_clamped) )
print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) )
#display(sp.Matrix(sigmabar))
#==========================================================================
# Plotting
#==========================================================================
if plots:
windowwidth = 800
windowheight = 450
zplot = zeros(2*nply)
for i,k in enumerate(range(0,2*nply,2)): # = nply
zplot[k:k+2] = z[i:i+2]
#legendlab = ['total','thermal','applied','laminate']
# global stresses and strains
mylw = 1.5 #linewidth
# Global Stresses and Strains
f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$']
strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal')
ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
#ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6))
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,50,windowwidth,windowheight)
except:
pass
f1.show()
#plt.savefig('global-stresses-strains.png')
### Local Stresses and Strains
f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True)
f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle))
stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$']
strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$']
strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] ,
[ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] ,
[ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ]
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(strainlabel[i])
ax.set_title(' Ply Strain '+strainlabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
ax.grid(True)
for i,ax in enumerate([ax4,ax5,ax6]):
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
ax.set_title(' Ply Stress '+stresslabel[i])
ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2))
ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total')
ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal')
ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied')
ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate')
### plots strengths
#ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw)
ax.grid(True)
leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### Failure
f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True)
f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle))
stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$']
for i,ax in enumerate([ax1,ax2,ax3]):
## the top axes
ax.set_ylabel('thickness,z')
ax.set_xlabel(stresslabel[i])
#ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100))
ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2))
ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total')
ax.grid(True)
ax.set_title('Failure Index, fail if > 1')
#leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3)
tight_layout()
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight)
except:
pass
f2.show()
#plt.savefig('local-stresses-strains.png')
### warpage
res = 100
Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res))
epsx = epsilon_laminate[0,0]
epsy = epsilon_laminate[1,0]
epsxy = epsilon_laminate[2,0]
kapx = epsilon_laminate[3,0]
kapy = epsilon_laminate[4,0]
kapxy = epsilon_laminate[5,0]
### dispalcement
w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt)
u = epsx*Xplt # pg 451 hyer
fig = plt.figure('plate-warpage')
ax = fig.gca(projection='3d')
ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3)
###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10])
ax.set_xlabel('plate width,y-direction,in')
ax.set_ylabel('plate length,x-direction, in')
ax.set_zlabel('warpage,in')
#ax.set_zlim(-0.01, 0.04)
#mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450)
try:
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight)
except:
pass
plt.show()
#plt.savefig('plate-warpage')
return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max
def plate():
'''
composite plate mechanics
TODO - results need vetted
'''
#==========================================================================
# Initialize
#==========================================================================
get_ipython().magic('matplotlib')
plt.close('all')
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
#plt.rcParams['legend.fontsize'] = 14
#==========================================================================
# Import Material Properties
#==========================================================================
plythk = 0.0025
plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply
nply = len(plyangle) # number of plies
laminatethk = np.zeros(nply) + plythk
H = sum(laminatethk) # plate thickness
# Create z dimensions of laminate
z_ = np.linspace(-H/2, H/2, nply+1)
a = 20 # plate width;
b = 10 # plate height
q0_ = 5.7 # plate load;
# Transversly isotropic material properties
E1 = 150e9
E2 = 12.1e9
nu12 = 0.248
G12 = 4.4e9
nu23 = 0.458
G23 = E2 / (2*(1+nu23))
# Failure Strengths
F1t = 1500e6
F1c = -1250e6
F2t = 50e6
F2c = -200e6
F12t = 100e6
F12c = -100e6
Strength = np.array([[F1t, F1c],
[F2t, F2c],
[F12t, F12c]])
th = sp.symbols('th')
# Stiffnes matrix in material coordinates
Cijm6 = inv(Sij6)
# reduced stiffness in structural
Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0],
[Cij6[0,1], Cij6[1,1], 0],
[0, 0, Cij6[5,5] ]] )
Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)],
[sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)],
[-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]])
## Cylindrical Bending of a laminated plate
# displacement in w (z direction)
from sympy.abc import x
f = Function('f')
eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False)
pprint(eq)
#==============================================================================
th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16')
wfun = Function('wfun')
ufun = Function('ufun')
## EQ 4.4.1a
eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3)
#eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1
## EQ 4.4.1b
#eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1
eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3)
## EQ 4.4.1c
#eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0;
eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0
################## python conversion eded here ################################
# solve eq1 eq2 and eq3 to get the w and u functions
# displacement in w (z direction) from eq1,eq2,eq3
wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4
# displacement in u (x direction) from eq1,eq2,eq3
ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7
# Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3)
# cond1 -> w(0)=0 at x(0), roller
C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0
# cond2 -> angle at dw/dx at x(0) is 0, cantilever
C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0
# cond3 -> w(z) = 0 at x(a), roller
C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3
# cond4 u = 0 at x = 0
C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0
# u=0 at x = a
C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6
# cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment
C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3
# EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx
C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3
C3sol = sp.solve(C6sol1 - C6sol2,C3)[0]
C4sol = C4sol1.subs(C3,C3sol)
C6sol = sp.simplify(C6sol2.subs(C3,C3sol))
C5sol = sp.simplify(C5sol1.subs(C6,C6sol))
# substitute integration constants with actual values( _ is actual number)
C1_ = copy(C1sol)
C2_ = copy(C2sol)
C7_ = copy(C7sol)
C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function w(x) vertical displacement w along z with actual vaules
wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# function u(x) horizontal displacement u along x with actual vaules
usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]})
# 3d plots
plot3d(wsol,(x,0,a), (y,0,b))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Cylindrical Bending -Displacement of a plate With CLPT')
## Strain calculation
# eq 3.3.8 (pg 116 reddy (pdf = 138))
epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]])
epsx = epstotal[0,0]
## Calculating and plotting Stress in each layer
res = 8 # accuracy of finding max and min stress
xplot = linspace(0,a,res)
yplot = linspace(0,b,res)
G0 = sp.symbols('G0')
Globalminstress = np.zeros((3, nply))
Globalmaxstress = np.zeros((3, nply))
for kstress in range(3): # stress state s_x, s_y, s_xz
plt.figure(kstress+1)
for klay in range(nply): # loop through all layers
thplot = plyangle[klay]
zplot = linspace(z_[klay],z_[klay+1],res)
stressplot = np.zeros((len(zplot),len(xplot)))
## Calc Stresses
if kstress == 2:
# Shear stresses
G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0
# solve for shear stresses from s_1
s_xz = sp.solve(G0_,G0)[0]
# out of plane shear S_xz does not need to be transformed ??
plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) )
else:
# normal stresses
# Cij = reduced structural stiffness in strictural coordinates 3x3
# stress in structural coordinates
s_stress = Cij.subs(th,thplot) @ epstotal
# stressin material coordinates
m_stress = Tij.subs(th,thplot) @ s_stress
#ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)])
## find max stress in each layer
ii=0
for i in xplot:
jj=0
for j in zplot:
if kstress == 2:
stressplot[ii,jj] = s_xz.subs({x:i, z:j})
else:
stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j})
jj+=jj
ii+=ii
Globalminstress[kstress,klay] = np.min(stressplot)
Globalmaxstress[kstress,klay] = np.max(stressplot)
#
plt.title('\sigma_%i' % kstress)
## Plot max stress and failure strength
plt.figure()
for i in range(3):
plt.subplot(1, 3, i+1)
plt.bar(range(nply), Globalmaxstress[i,:])
plt.bar(range(nply), Globalminstress[i,:])
plt.scatter(range(nply),np.ones(nply) * Strength[i,0])
plt.scatter(range(nply),np.ones(nply) * Strength[i,1])
plt.xlabel('layer')
plt.title('\sigma%i' % i)
def plate_navier():
'''
composite plate bending with navier solution
TODO - code needs to be converted from matlab
'''
## Plate a*b*h simply supported under q = q0 CLPT
pass
'''
q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y')
Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b))
dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4)
Wmn = Qmn/dmn;
w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b);
w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] );
figure
w0sum = 0;
for n_ = 1:10
for m_ = 1:10
w0sum = w0sum + subs(w0_,[n m],[n_ m_]);
end
end
w0sum;
% xplot = linspace(0,a_,res);
% yplot = linspace(0,b_,res);
ii=1;
for i = xplot
jj=1;
for j = yplot
w0plot(ii,jj) = subs(w0sum,[x y],[i j]);
jj=jj+1;
end
ii=ii+1;
end
surf(xplot,yplot,w0plot)
colorbar
set(gca,'PlotBoxAspectRatio',[2 1 1]);
xlabel('length a, u(x)')
ylabel('length b, v(y)')
zlabel('w(z)')
'''
class laminate(object):
"""
IN-WORK - laminate object for composite material analysis
"""
# constructor
def __init__(self, plyangle, matindex, matname):
# run when laminate is instantiated
# loads materials used
self.plyangle = plyangle
self.matindex = matindex
self.matname = matname
self.__mat = self.__import_matprops(matname)
# create a simple function to handle CTE properties
def __alphaf(self, mat):
return array([[mat.alpha1], [mat.alpha2], [0]])
self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ])
self.nply = len(self.laminatethk) # number of plies
self.H = np.sum(self.laminatethk) # plate thickness
# area = a_width*H
z = zeros(self.nply+1)
zmid = zeros(self.nply)
z[0] = -self.H/2
for i in range(self.nply):
z[i+1] = z[i] + self.laminatethk[i]
zmid[i] = z[i] + self.laminatethk[i]/2
self.z = z
self.zmid = zmid
self.__abdmatrix()
def __Qf(self, E1,E2,nu12,G12):
'''transversly isptropic compliance matrix. pg 58 herakovich
G12 = E1/(2*(1+nu12)) if isotropic'''
nu21 = E2*nu12/E1
Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0],
[ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0],
[0, 0, G12]])
return Q
def __T1(self, th):
'''Stress Transform for Plane Stress
th=ply angle in degrees
voight notation for stress tranform. sigma1 = T1 @ sigmax
recall T1(th)**-1 == T1(-th)'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T1 = array( [[m**2, n**2, 2*m*n],
[n**2, m**2,-2*m*n],
[-m*n, m*n,(m**2-n**2)]])
return T1
def __T2(self, th):
'''Strain Transform for Plane Stress
th=ply angle in degrees
voight notation for strain transform. epsilon1 = T2 @ epsilonx'''
n = sin(th*pi/180)
m = cos(th*pi/180)
T2 = array( [[m**2, n**2, m*n],
[n**2, m**2,-m*n],
[-2*m*n, 2*m*n, (m**2-n**2)]])
return T2
# private method
def __abdmatrix(self):
'''used within the object but not accessible outside'''
#==========================================================================
# ABD Matrix Compute
#==========================================================================
# Reduced stiffness matrix for a plane stress ply in principal coordinates
# calcluating Q from the Compliance matrix may cause cancE1ation errors
A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3))
for i in range(self.nply): # = nply
Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1,
self.__mat[self.matname[self.matindex[i]]].E2,
self.__mat[self.matname[self.matindex[i]]].nu12,
self.__mat[self.matname[self.matindex[i]]].G12 )
Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i])
A += Qbar*(self.z[i+1]-self.z[i])
# coupling stiffness
B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2)
# bending or flexural laminate stiffness relating moments to curvatures
D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3)
# laminate stiffness matrix
ABD = zeros((6,6))
ABD[0:3,0:3] = A
ABD[0:3,3:6] = B
ABD[3:6,0:3] = B
ABD[3:6,3:6] = D
self.ABD = ABD
# method
def available_materials(self):
'''show the materials available in the library'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
print('---available materials---')
for k in matprops.columns.tolist():
print(k)
print('-------------------------')
# private method to be used internally
def __import_matprops(self, mymaterial=['T300_5208','AL_7075']):
'''
import material properties
'''
matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0)
if mymaterial==[] or mymaterial=='':
print(matprops.columns.tolist())
mat = matprops[mymaterial]
#mat.applymap(lambda x:np.float(x))
mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore'))
return mat
def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup):
'''
find the miniumu margin give load conditions
'''
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
_, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= layup,
plymatindex=[0,0,0,0],
materials = [mymat],
platedim=[10,10],
zoffset=0,
SF=1.0,
plots=0,
prints=0)
return FAILUREINDEX_MAXSTRESS_max
def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ):
'''
loops through and tries to find a load that is close to 0 and then
attempts to find the root (ie margin=0)
older version used newton method for root finding
scipy.optimize.newton(laminate_min, guess)
TODO: Current calculation is stupid using random points to plot. fix it
by use FI, failure index instead of margin to generate a
linear relationship and envelope
'''
#laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0)
loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0']
laminate_min_list = []
laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup))
laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup))
envelope_loads = []
N_t = array([0,1])
N_c = array([0,-1])
for loadname,laminate_min in zip(loadnamelist,laminate_min_list):
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
N_crit_t = (1-b) / m
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
N_crit_c = (1-b) / m
envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c))
print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat))
for k in envelope_loads:
print(k)
# plot envelope
Nx_env = []
Nxy_env = []
laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nx_env.append( (1-b) / m )
Nxy_env.append( 0 )
laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)
# compression
FI = [laminate_min(N) for N in N_c]
m = (FI[1]-FI[0]) / (N_c[1] - N_c[0])
b = FI[1]-m*N_c[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
# tension
FI = [laminate_min(N) for N in N_t]
m = (FI[1]-FI[0]) / (N_t[1] - N_t[0])
b = FI[1]-m*N_t[1]
Nxy_env.append( (1-b) / m )
Nx_env.append( 0 )
laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup)
n = 500
f = 1.25 # < 1
# arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n)
# arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n)
# Nx_r = np.concatenate((arr1, arr2))
#
# arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n)
# arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n)
# Nxy_r = np.concatenate((arr1, arr2))
Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n)
Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n)
for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r):
FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri)
if FI < 1:
Nx_env.append(Nx_ri)
Nxy_env.append(Nxy_ri)
points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)])
hull = scipy.spatial.ConvexHull(points)
plot(points[:,0], points[:,1], 'bo')
for simplex in hull.simplices:
plot(points[simplex, 0], points[simplex, 1], 'k-')
xlabel('Nx, lb/in')
ylabel('Nxy, lb/in')
title('Failure envelope')
return envelope_loads
def my_laminate_with_loading():
# loads lbs/in
Nx = 50
Ny = 0
Nxy = 0
Mx = 0
My = 0
Mxy = 0
q0 = 0 # pressure
# Qx = 0
# Qy = 0
a_width = 50
b_length = 3.14*6.75
## sandwich laminate
# plyangle= [45,45,0, 45,45],
# plymatindex=[0, 0, 1, 0, 0],
# create a 45 carbon cloth panel with a 0.5 inch rohacell core
laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy],
ek=[0,0,0,0,0,0],
q0=q0,
plyangle= [0,60,-60,-60,60,0],
plymatindex=[0,0,0,0,0,0],
materials = ['E-Glass Epoxy Uni'],
platedim=[a_width,b_length],
zoffset=0,
SF=2.0,
plots=0,
prints=1)
if __name__=='__main__':
#plot_single_max_failure_loads()
#plot_failure_index()
my_laminate_with_loading()
#material_plots(['E-Glass Epoxy fabric M10E-3783'])
#plate()
#plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H'])
#plot_single_max_failure_loads()
# # reload modules
# import importlib ; importlib.reload
# from composites import laminate
# plyangle = [0,45]
# matindex = [0,0]
# matname = ['graphite-polymer_SI']
# lam1 = laminate(plyangle, matindex, matname)
# lam1.ABD
|
import copy
import itertools
from graph_generator import generate_complete_graph
from models import LinearLayout, Graph
from view import show_linear_layouts
def observation_1(show_layouts=True):
"""
Generate all possible 2-stack 1-queue layouts of a complete graphs with
8 vertices. Except that edges (i, i + 1) are always assigned to a stack
pages which skips minor variations and reduces the total number to 32
layouts.
"""
num_vertices = 8
stacks = 2
queues = 1
graph = generate_complete_graph(num_vertices)
mll = LinearLayout(graph=graph,
order=list(range(1, num_vertices + 1)),
stacks=stacks, queues=queues)
# Add edges that can always be on the stack without crossings
remaining_edges = graph.edges
if stacks:
for i in range(1, num_vertices):
mll.stacks[0].append((i, i + 1))
mll.stacks[0].append((1, num_vertices))
remaining_edges = list(set(graph.edges) - set(mll.stacks[0]))
def search(mll, edges):
edges = list(edges)
if not edges:
mlls.append(copy.deepcopy(mll))
return None
edge = edges.pop()
for stack in mll.stacks:
stack.append(edge)
if mll.is_stack_valid(stack):
if search(mll, edges):
return mll
stack.remove(edge)
for queue in mll.queues:
queue.append(edge)
if mll.is_queue_valid(queue):
if search(mll, edges):
return mll
queue.remove(edge)
mlls = []
search(mll, remaining_edges)
for mll in mlls:
if not mll.is_valid():
raise Exception('MLL is not valid!')
if show_layouts:
show_linear_layouts([mll])
return mlls
def observation_2():
"""
Show that given two complete graphs with 8 vertices on a 2-stack 1-queue
layout the vertices can only interleave once.
"""
mlls_1 = observation_1(show_layouts=False)
mlls_2 = []
# create a second list of layouts that contains the same layouts and
# additionally all of them with the stack pages swapped
for mll in copy.deepcopy(mlls_1):
mlls_2.append(mll)
mll_new = copy.deepcopy(mll)
mll_new.stacks[0], mll_new.stacks[1] = \
mll_new.stacks[1], mll_new.stacks[0]
mlls_2.append(mll_new)
# Create a graph of two independent complete graphs with 8 vertices
graph = Graph()
num_vertices = 8
for i in range(1, num_vertices + 1):
for j in range(i + 1, num_vertices + 1):
graph.add_edge(i, j)
for i in range(num_vertices + 1, num_vertices * 2 + 1):
for j in range(i + 1, num_vertices * 2 + 1):
graph.add_edge(i, j)
mlls = []
for permutation in list(itertools.product([True, False], repeat=8)):
if all(permutation):
# Skip the permutation that would separate both K8
continue
for m1 in mlls_1:
for m2 in mlls_2:
order = []
o1 = list(range(1, 9))
o2 = list(range(9, 17))
# All these permutations create all the possible orders in
# which the two K8 can interleave.
for p in permutation:
if p:
order.append(o1.pop(0))
else:
order.append(o2.pop(0))
order.append(o1.pop(0))
while o2:
order.append(o2.pop(0))
mll = LinearLayout(graph=graph, order=order,
stacks=2, queues=1)
# Create a new layout by combining the layouts of m1 and m2.
# Note that the edges of m2 need to be adjusted here. m2 has
# vertices form 1-8 but here they should become 9-16
mll.stacks[0] = m1.stacks[0] + [(a + 8, b + 8) for a, b in m2.stacks[0]]
mll.stacks[1] = m1.stacks[1] + [(a + 8, b + 8) for a, b in m2.stacks[1]]
mll.queues[0] = m1.queues[0] + [(a + 8, b + 8) for a, b in m2.queues[0]]
if mll.is_valid():
# This order is the only possible order in which
# both K8 can interleave (except for the mirrored
# version). Note that the list is ordered
# except that 9 and 8 are swapped.
if order == [1, 2, 3, 4, 5, 6, 7, 9, 8, 10, 11, 12, 13, 14, 15, 16]:
mlls.append(mll)
else:
print('The observation is false!')
print('%s layouts found' % len(mlls))
for mll in mlls:
pass # Uncomment the next line to show the layouts
#show_linear_layouts([mll])
def observation_3():
"""
Show that given two complete graphs with 8 vertices that share two vertices
there is only one vertex order possible where the shared vertices are
exactly in the middle.
"""
mlls_1 = observation_1(show_layouts=False)
mlls_2 = []
# create a second list of layouts that contains the same layouts and
# additionally all of them with the stack pages swapped
for mll in copy.deepcopy(mlls_1):
mlls_2.append(mll)
mll_new = copy.deepcopy(mll)
mll_new.stacks[0], mll_new.stacks[1] = \
mll_new.stacks[1], mll_new.stacks[0]
mlls_2.append(mll_new)
mlls = []
# The graph has 14 vertices. Generate all pairs. These pairs are going to
# be the two shared vertices
for sv1, sv2 in itertools.combinations(list(range(1, 15)), 2):
# Get lists of vertices that belong each K8
if sv1 > 6:
left_vertices = [1, 2, 3, 4, 5, 6]
elif sv2 > 7:
left_vertices = [1, 2, 3, 4, 5, 6, 7]
left_vertices.remove(sv1)
else:
left_vertices = [1, 2, 3, 4, 5, 6, 7, 8]
left_vertices.remove(sv1)
left_vertices.remove(sv2)
if sv2 < 9:
right_vertices = [9, 10, 11, 12, 13, 14]
elif sv1 < 8:
right_vertices = [8, 9, 10, 11, 12, 13, 14]
right_vertices.remove(sv2)
else:
right_vertices = [7, 8, 9, 10, 11, 12, 13, 14]
right_vertices.remove(sv1)
right_vertices.remove(sv2)
graph = Graph()
for v in range(1, 15):
graph.add_vertex(v)
for i in left_vertices + [sv1, sv2]:
for j in left_vertices + [sv1, sv2]:
if i != j:
graph.add_edge(i, j)
for i in right_vertices + [sv1, sv2]:
for j in right_vertices + [sv1, sv2]:
if i != j:
graph.add_edge(i, j)
for m1 in mlls_1:
for m2 in mlls_2:
order = list(range(1, 15))
mll = LinearLayout(graph=graph, order=order,
stacks=2, queues=1)
l_vertices = copy.copy(left_vertices)
l_vertices.extend([sv1, sv2])
l_vertices.sort()
r_vertices = copy.copy(right_vertices)
r_vertices.extend([sv1, sv2])
r_vertices.sort()
# Create a new layout by combining the layouts of m1 and m2.
transformation = (
(mll.stacks[0], m1.stacks[0], l_vertices),
(mll.stacks[1], m1.stacks[1], l_vertices),
(mll.queues[0], m1.queues[0], l_vertices),
(mll.stacks[0], m2.stacks[0], r_vertices),
(mll.stacks[1], m2.stacks[1], r_vertices),
(mll.queues[0], m2.queues[0], r_vertices),
)
for target_page, source_page, vertex_list in transformation:
for edge in source_page:
v1 = vertex_list[edge[0] - 1]
v2 = vertex_list[edge[1] - 1]
if v1 == sv1 and v2 == sv2 \
and (v1, v2) in mll.stacks[0] + mll.stacks[1] + mll.queues[0]:
# Don't add the edge between the shared vertices twice
continue
target_page.append((v1, v2))
if mll.is_valid():
# The only way to get a valid layout is if the shared
# vertices are placed in the middle
if sv1 == 7 and sv2 == 8:
mlls.append(mll)
else:
print('The observation is false!')
print('%s layouts found' % len(mlls))
for mll in mlls:
pass # Uncomment the next line to show the layouts
show_linear_layouts([mll])
|
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.contrib.auth.models import User
from django.db import models
from . import models
from gallery.models import Album
from events.models import Event
from bot.models import TelegramUser
import hashlib
def create_user():
username = 'Superuser'
user = User.objects.create_user(username, '<EMAIL>', 'Password')
user.save()
telegram_id = int(hashlib.md5(user.username.encode("utf-8")).hexdigest()[:8], 16)
TelegramUser.create_and_save(user=user, telegram_id=telegram_id)
return user
class TestHomeView(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
def test_redirect_to_home_works_for_authenticated_user(self):
self.client.force_login(user=self.user)
response = self.client.get('/', follow=True)
self.assertRedirects(response, reverse('home:home'))
def test_login_needed_before_home(self):
response = self.client.get('/', follow=True)
self.assertRedirects(response, reverse('pages:landing'))
messages = list(response.context['messages'])
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), 'Not logged in.')
def test_home_shows_template(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'))
self.assertTemplateUsed(response, 'home/home.html')
def test_home_view_uses_user_groups(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=1)
group.users.add(self.user)
photo = models.Photo.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="TODO")
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'))
self.assertTrue(response.context['user_groups'])
def test_home_view_shows_nothing_for_user_in_no_groups(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'))
self.assertFalse(response.context['feed_list'])
def test_home_view_returns_no_user_content_when_not_using_ajax(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=2)
group.users.add(self.user)
photo = models.Photo.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="TODO")
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'))
self.assertEquals(response.context['feed_list'], [])
self.assertTemplateUsed(response, 'home/home.html')
def test_home_view_returns_user_content_when_using_ajax(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=2)
group.users.add(self.user)
photo = models.Photo.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="TODO")
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'), {'group': group.id}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTrue(response.context['feed_list'])
self.assertTemplateUsed(response, 'home/feed.html')
def test_home_view_uses_audio_player_for_audio(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=2)
group.users.add(self.user)
audio = models.Audio.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.mp3", duration_sec="300", title="Meteora", interpret="Linkin Park",
telegram_id=2004)
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'), {'group': group.id}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(response, 'assets/audio_player.html')
def test_ajax_request(self):
self.client.force_login(user=self.user)
response = self.client.post('/home', {"page": "2", "querystring_key": "page"},
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertTemplateUsed('home/feed.html')
def test_authenticated_user_is_not_redirected_to_landing(self):
self.client.force_login(user=self.user)
response = self.client.get('/', follow=True)
self.assertRedirects(response, reverse('home:home'))
def test_authenticated_user_is_not_redirected_to_login(self):
self.client.force_login(user=self.user)
response = self.client.get('/', follow=True)
self.assertRedirects(response, reverse('home:home'))
def test_newest_item_timestamp_in_request(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=2)
group.users.add(self.user)
link1 = models.Link.create_and_save(user=self.user, group=group, timestamp="2016-05-22 12:59:10+01:00",
url="https://www.thisOneNot.de/")
link2 = models.Link.create_and_save(user=self.user, group=group, timestamp="2016-05-24 12:59:10+01:00",
url="https://www.thisOneShouldBeIn.de/")
self.client.force_login(user=self.user)
response = self.client.get('/home', {"newest-item-timestamp": "2016-05-23-11:00:41", 'group': group.id},
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertTrue(len(response.context['feed_list']) == 1)
def test_newest_item_timestamp_in_request_without_specified_group(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=2)
group.users.add(self.user)
link1 = models.Link.create_and_save(user=self.user, group=group, timestamp="2016-05-22 12:59:10+01:00",
url="https://www.thisOneNot.de/")
link2 = models.Link.create_and_save(user=self.user, group=group, timestamp="2016-05-24 12:59:10+01:00",
url="https://www.thisOneShouldBeIn.de/")
self.client.force_login(user=self.user)
response = self.client.get('/home', {"newest-item-timestamp": "2016-05-23-11:00:41"},
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertTrue(len(response.context['feed_list']) == 1)
class TestHomeGroupView(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.client = Client()
self.user = create_user()
def test_home_view_uses_right_groups(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=3)
group.users.add(self.user)
group_2 = models.Group.create_and_save(name="Relegationsspiel", picture="", description="abc", telegram_id=4)
group_2.users.add(self.user)
photo = models.Photo.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="TODO")
photo_2 = models.Photo.create_and_save(user=self.user, group=group_2, timestamp="2016-05-25 12:59:10+01:00",
file="2.jpg", thumbnail="TODO")
self.client.force_login(user=self.user)
response = self.client.get('/home', {'group': group.id}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEquals(len(response.context['feed_list']), 1)
def test_home_view_uses_all_groups(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="abc", telegram_id=5)
group.users.add(self.user)
group_2 = models.Group.create_and_save(name="Relegationsspiel", picture="", description="abc", telegram_id=6)
group_2.users.add(self.user)
photo = models.Photo.create_and_save(user=self.user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="TODO")
photo_2 = models.Photo.create_and_save(user=self.user, group=group_2, timestamp="2016-05-25 12:59:10+01:00",
file="2.jpg", thumbnail="TODO")
self.client.force_login(user=self.user)
response = self.client.get(reverse('home:home'), {'group': [group.id, group_2.id]},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed('home/feed.html')
self.assertEquals(len(response.context['feed_list']), 2)
class TestGroupModel(TestCase):
def test_group_create(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="id", description="Crazy trip",
telegram_id=7)
self.assertEquals(len(models.Group.objects.all()), 1)
self.assertEquals(group.name, "Croatia 2016")
self.assertEquals(group.chat_photo_file_id, "id")
self.assertEquals(group.description, "Crazy trip")
self.assertEquals(group.telegram_id, 7)
def test_string_representation(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="TODO", description="Crazy trip",
telegram_id=8)
self.assertEquals(str(group), "Croatia 2016")
def test_many_users(self):
username1 = 'Superuser1'
user1 = User.objects.create_user(username1, '<EMAIL>', 'Password')
user1.save()
telegram_id = int(hashlib.md5(username1.encode("utf-8")).hexdigest()[:8], 16)
TelegramUser.create_and_save(user=user1, telegram_id=telegram_id)
username2 = 'Superuser2'
user2 = User.objects.create_user(username2, '<EMAIL>', 'Password')
user2.save()
telegram_id = int(hashlib.md5(username2.encode("utf-8")).hexdigest()[:8], 16)
TelegramUser.create_and_save(user=user2, telegram_id=telegram_id)
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=122)
group.users.add(user1)
group.users.add(user2)
self.assertEquals(len(group.users.all()), 2)
class TestPhotoModel(TestCase):
def test_photo_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=9)
photo = models.Photo.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.jpg", thumbnail="1_thumb.jpg")
self.assertEquals(len(models.Photo.objects.all()), 1)
self.assertEquals(photo.user, user)
self.assertEquals(photo.group, group)
self.assertEquals(photo.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(photo.file, "1.jpg")
self.assertEquals(photo.thumbnail, "1_thumb.jpg")
class TestTextModel(TestCase):
def test_text_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=10)
text = models.Text.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
content="When do we start?")
self.assertEquals(len(models.Text.objects.all()), 1)
self.assertEquals(text.user, user)
self.assertEquals(text.group, group)
self.assertEquals(text.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(text.content, "When do we start?")
def test_string_representation(self):
# User
user = User.objects.create_user('Superuser', '<EMAIL>', 'Password')
user.save()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=11)
text = models.Text.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
content="When do we start?")
self.assertEquals(str(text), "When do we start?")
class TestAlbumModel(TestCase):
def test_album_create(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=111)
album = Album.create_and_save(name="Croatia 2016 Pag", description="Holidays", group=group)
self.assertEquals(len(Album.objects.all()), 1)
self.assertEquals(album.name, "Croatia 2016 Pag")
self.assertEquals(album.description, "Holidays")
def test_string_representation(self):
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=111)
album = Album.create_and_save(name="Croatia 2016 Pag", description="Holidays", group=group)
self.assertEquals(str(album), "Croatia 2016 Pag")
class TestAudioModel(TestCase):
def test_audio_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=12)
audio = models.Audio.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.mp3", duration_sec="300", title="Meteora", interpret="Linkin Park",
telegram_id=2000)
self.assertEquals(len(models.Audio.objects.all()), 1)
self.assertEquals(audio.user, user)
self.assertEquals(audio.group, group)
self.assertEquals(audio.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(audio.file, "1.mp3")
self.assertEquals(audio.duration_sec, "300")
self.assertEquals(audio.title, "Meteora")
self.assertEquals(audio.interpret, "Linkin Park")
class TestVideoModel(TestCase):
def test_video_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=13)
video = models.Video.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="1.mkv", duration_sec="300", width="1920", height="1080",
thumbnail="1_thumb.jpg")
self.assertEquals(len(models.Video.objects.all()), 1)
self.assertEquals(video.user, user)
self.assertEquals(video.group, group)
self.assertEquals(video.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(video.file, "1.mkv")
self.assertEquals(video.duration_sec, "300")
self.assertEquals(video.width, "1920")
self.assertEquals(video.height, "1080")
self.assertEquals(video.thumbnail, "1_thumb.jpg")
class TestLinkModel(TestCase):
def test_link_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=14)
link = models.Link.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
url="https://www.google.de/")
self.assertEquals(len(models.Link.objects.all()), 1)
self.assertEquals(link.user, user)
self.assertEquals(link.group, group)
self.assertEquals(link.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(link.url, "https://www.google.de/")
def test_string_representation(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=15)
link = models.Link.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
url="https://www.google.de/")
self.assertEquals(str(link), "https://www.google.de/")
class TestFileModel(TestCase):
def test_file_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=16)
file = models.File.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
other_file="Overview.pdf", file_name='overview', doc_type="JPEG",
telegram_id='3000')
self.assertEquals(len(models.File.objects.all()), 1)
self.assertEquals(file.user, user)
self.assertEquals(file.group, group)
self.assertEquals(file.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(file.other_file, "Overview.pdf")
self.assertEquals(file.doc_type, "JPEG")
def test_string_representation(self):
# User
user = User.objects.create_user('Superuser', '<EMAIL>', 'Password')
user.save()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=17)
file = models.File.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
other_file="Overview.pdf", file_name='overview', doc_type="JPEG",
telegram_id='3001')
self.assertEquals(str(file), "Overview.pdf")
class TestStickerModel(TestCase):
def test_sticker_create(self):
# User
user = create_user()
# Group
group = models.Group.create_and_save(name="Croatia 2016", picture="", description="", telegram_id=18)
sticker = models.Sticker.create_and_save(user=user, group=group, timestamp="2016-05-25 12:59:10+01:00",
file="TODO", telegram_id="18")
self.assertEquals(len(models.Sticker.objects.all()), 1)
self.assertEquals(sticker.user, user)
self.assertEquals(sticker.group, group)
self.assertEquals(sticker.timestamp, "2016-05-25 12:59:10+01:00")
self.assertEquals(sticker.file, "TODO")
self.assertEquals(sticker.telegram_id, "18")
class TestEventModel(TestCase):
def test_event_create(self):
# Group
group = models.Group.create_and_save(name="Rock im Park", picture="", description="", telegram_id=19)
event = Event.create_and_save(name="Rock im Park", location="Nürnberg", group=group,
allday=True, start="2016-06-04 8:00+01:00")
self.assertEquals(len(Event.objects.all()), 1)
self.assertEquals(event.name, "Rock im Park")
self.assertEquals(event.location, "Nürnberg")
self.assertEquals(event.group, group)
self.assertEquals(event.allday, True)
self.assertEquals(event.start, "2016-06-04 8:00+01:00")
def test_string_representation(self):
# Group
group = models.Group.create_and_save(name="Rock im Park", picture="", description="", telegram_id=20)
event = Event.create_and_save(name="Rock im Park", group=group, location="Nürnberg",
allday=True, start="2016-06-04 8:00+01:00")
self.assertEquals(str(event), "Rock im Park")
|
<reponame>salman-ahmed-sheikh/gpt-2
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
from google_trans_new import google_translator
import csv
import random
import model, sample, encoder
def translate(items):
translator = google_translator()
if type(items) == "list":
ret = []
for item in items:
ret.append(translator.translate(item, lang_tgt = 'hu'))
return ret
else:
items.replace("<|endoftext|>", "")
return translator.translate(items, lang_tgt = 'hu')
def selectRandom (items, minm, maxm):
count = random.randint(minm, maxm)
return random.sample(items, count)
def addImages(txt, imgs):
try:
ll = txt.split("\n")
img = random.choice(imgs)
img2 = random.choice(imgs)
cnt1 = (len(ll) // 2) //2
cnt2 = cnt1 + (len(ll) // 2)
out = "\n".join(ll[0:cnt1])
out = out + " <img src=" + img + ">"
out = out + "\n".join(ll[cnt1:cnt2])
out = out + " <img src=" + img2 + ">"
out = out + "\n".join(ll[cnt2:])
return out
except Exception as e:
print(e)
return txt
def highlight_Article(art, high):
for h in high:
if len (h) > 3:
fin = "<b>" + h + "</b>"
art = art.replace(h, fin)
return art
def interact_model(
#file1,file2,file3,
model_name='1558M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=40,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
sameKeyword = True # True: for same keyword in all headings, False: for random keyword for each heading
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 3
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
st_head = ["<h1>", "<h2>", "<h3>"]
en_head = ["</h1>", "</h2>", "</h3>"]
if length is None:
length = 300#hparams.n_ctx - 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
try:
os.remove("output.csv")
except:
pass
outpt = csv.writer(open('output.csv', 'w', encoding='utf-8'))
outpt.writerow(["keyword", "GUID", "Description", "Tags", "Article","Article-english", "Category"])
# open text file
with open('u\\tx.txt') as f0:#open('tx654.txt') as f0:#open('u\\te.txt') as f0:#open('tx654.txt') as f0:
txt = f0.readlines()
# open title file
with open('u\\ti.txt') as f1:#open('ttt165.txt') as f1:#open('u\\ti.txt') as f1: #open('ttt165.txt') as f1:
titles = f1.readlines()
# open keywords file
with open('u\\k.txt') as f2:# open('kk654.txt') as f2:#open('u\\k.txt') as f2: #open('kk654.txt') as f2:
keywords = f2.readlines()
# open images file
with open('u\\i.txt') as f3:# open('im95.txt') as f3:#open('u\\i.txt') as f3: #open('im95.txt') as f3:
images = f3.readlines()
for xm, (title,tt) in enumerate (zip(titles,txt)):
keyword = translate(keywords[xm % len(keywords)])
print("=" * 20)
tt = tt[0:tt.rindex(".")]
usd_titles = []
#tt= tt.replace("\n","")
title = title.replace("\n","")
usd_titles.append(title)
title = translate(title)
highlight = title.split(" ")
highlight.extend(keyword.split(" "))
print("Generating text for: ", title)
print("Input Sentence: ", tt)
print("=" * 20)
inps = tt.split(".")
imgs = random.sample(images, min(len(inps)-1,len(images)))
tits = random.sample(titles, min(len(inps)-1,len(titles)))
tmp2 = random.sample(keywords, min(len(inps)-1,len(keywords)))
kkw = [translate(k.replace("\n",'')) for k in tmp2]
temp = [translate(t.replace("\n","")).split(" ") for t in tits]
[highlight.extend(tt) for tt in temp]
article = ""
art_eng = ""
for enm,inp in enumerate(inps):
while True:
context_tokens = enc.encode(inp)
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:, len(context_tokens):]
if not "<|endoftext|>" in enc.decode(out[0]):
break
amb = inp + enc.decode(out[0])
amb = amb[0:amb.rindex(".")] + "."
art_eng += inp + amb
article += highlight_Article(translate(inp + amb),highlight)
if enm < len(inps)-1:
img = imgs[enm].replace("\n","")
article += "\n <img src=" + img + " alt = " + keyword + "> \n"
art_eng += "\n <img src=" + img + " alt = " + keyword + "> \n"
t2 = tits[enm].replace("\n","")
hd = random.randint(0,2)
if sameKeyword:
kk = keyword
else:
kk = kkw[enm]
article += st_head[hd] + kk + " - " + translate(t2) + en_head[hd] + "\n"
title = keyword +" - "+ title
print(art_eng)
#article = article.replace(" <| Endoftext |>", "") #
#article = article.replace("<|endoftext|>", "")
#article = translate(article)
#article = highlight_Article(article,highlight)
tags = translate(",".join(selectRandom(keywords,3,4)))
categories = translate(",".join(selectRandom(keywords,1,2)))
#article = addImages(article,images)
outpt.writerow([keyword, xm+1, title, tags, article,art_eng, categories])
if __name__ == '__main__':
fire.Fire(interact_model)
#interact_model()
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 <NAME> <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from pyrogram.api.core import Object
from pyrogram.api.types import (
KeyboardButtonUrl, KeyboardButtonCallback,
KeyboardButtonSwitchInline
)
class InlineKeyboardButton(Object):
"""This object represents one button of an inline keyboard. You must use exactly one of the optional fields.
Args:
text (``str``):
Label text on the button.
callback_data (``str``, *optional*):
Data to be sent in a callback query to the bot when button is pressed, 1-64 bytes.
url (``str``, *optional*):
HTTP url to be opened when button is pressed.
switch_inline_query (``str``, *optional*):
If set, pressing the button will prompt the user to select one of their chats, open that chat and insert
the bot's username and the specified inline query in the input field. Can be empty, in which case just
the bot's username will be inserted.Note: This offers an easy way for users to start using your bot in
inline mode when they are currently in a private chat with it. Especially useful when combined with
switch_pm… actions – in this case the user will be automatically returned to the chat they switched from,
skipping the chat selection screen.
switch_inline_query_current_chat (``str``, *optional*):
If set, pressing the button will insert the bot's username and the specified inline query in the current
chat's input field. Can be empty, in which case only the bot's username will be inserted.This offers a
quick way for the user to open your bot in inline mode in the same chat – good for selecting something
from multiple options.
callback_game (:obj:`CallbackGame <pyrogram.CallbackGame>`, *optional*):
Description of the game that will be launched when the user presses the button.NOTE: This type of button
must always be the first button in the first row.
pay (``bool``, *optional*):
Specify True, to send a Pay button.NOTE: This type of button must always be the first button in the
first row.
"""
ID = 0xb0700019
def __init__(
self,
text: str,
callback_data: str = None,
url: str = None,
switch_inline_query: str = None,
switch_inline_query_current_chat: str = None,
callback_game=None,
pay: bool = None
):
self.text = text
self.url = url
self.callback_data = callback_data
self.switch_inline_query = switch_inline_query
self.switch_inline_query_current_chat = switch_inline_query_current_chat
self.callback_game = callback_game
self.pay = pay
@staticmethod
def read(b, *args):
if isinstance(b, KeyboardButtonUrl):
return InlineKeyboardButton(
text=b.text,
url=b.url
)
if isinstance(b, KeyboardButtonCallback):
return InlineKeyboardButton(
text=b.text,
callback_data=b.data.decode()
)
if isinstance(b, KeyboardButtonSwitchInline):
if b.same_peer:
return InlineKeyboardButton(
text=b.text,
switch_inline_query_current_chat=b.query
)
else:
return InlineKeyboardButton(
text=b.text,
switch_inline_query=b.query
)
def write(self):
if self.callback_data:
return KeyboardButtonCallback(self.text, self.callback_data.encode())
if self.url:
return KeyboardButtonUrl(self.text, self.url)
if self.switch_inline_query:
return KeyboardButtonSwitchInline(self.text, self.switch_inline_query)
if self.switch_inline_query_current_chat:
return KeyboardButtonSwitchInline(self.text, self.switch_inline_query_current_chat, same_peer=True)
|
<filename>retro_data_structures/formats/mrea.py<gh_stars>0
"""
Wiki: https://wiki.axiodl.com/w/MREA_(Metroid_Prime_2)
"""
import hashlib
import io
import construct
from construct import (
Int32ub, Struct, Const, Float32b, Array, Aligned, GreedyBytes, ListContainer, Container, Rebuild,
Tell, Computed, FocusedSeq, IfThenElse, Prefixed, Pointer, Subconstruct, Switch
)
from retro_data_structures import game_check
from retro_data_structures.common_types import AssetId32
from retro_data_structures.compression import LZOCompressedBlock
from retro_data_structures.data_section import DataSectionSizes, DataSectionSizePointer
class PrefixedWithPaddingBefore(construct.Subconstruct):
def __init__(self, length_field, subcon):
super().__init__(subcon)
self.padding = 32
self.length_field = length_field
def _parse(self, stream, context, path):
length = self.length_field._parsereport(stream, context, path)
bytes_to_pad = self.padding - (length % self.padding)
if bytes_to_pad < self.padding:
construct.stream_read(stream, bytes_to_pad, path)
data = construct.stream_read(stream, length, path)
if self.subcon is GreedyBytes:
return data
return self.subcon._parsereport(io.BytesIO(data), context, path)
def _build(self, obj, stream, context, path):
stream2 = io.BytesIO()
buildret = self.subcon._build(obj, stream2, context, path)
data = stream2.getvalue()
length = len(data)
self.length_field._build(length, stream, context, path)
bytes_to_pad = self.padding - (length % self.padding)
if bytes_to_pad < self.padding:
construct.stream_write(stream, b"\x00" * bytes_to_pad, bytes_to_pad, path)
construct.stream_write(stream, data, len(data), path)
return buildret
class DataSectionInGroup(Subconstruct):
def _parse(self, stream, context, path):
group = self.subcon._parsereport(stream, context, path)
size_pointer = DataSectionSizePointer()
sections = []
offset = 0
for i in range(group.section_count):
section_size = size_pointer._parsereport(stream, context, path)
data = group.data[offset:offset + section_size]
sections.append(Container(
data=data,
hash=hashlib.sha256(data).hexdigest(),
))
offset += section_size
return Container(
compressed=group.header.value.compressed_size > 0,
sections=ListContainer(sections),
)
def _build(self, sections, stream, context, path):
if sections:
raise NotImplementedError
compressed_blocks = []
obj2 = ListContainer(compressed_blocks)
buildret = self.subcon._build(obj2, stream, context, path)
return obj2
def create(version: int, asset_id):
fields = [
"magic" / Const(0xDEADBEEF, Int32ub),
"version" / Const(version, Int32ub),
# Matrix that represents the area's transform from the origin.
# Most area data is pre-transformed, so this matrix is only used occasionally.
"area_transform" / Array(12, Float32b),
# Number of world models in this area.
"world_model_count" / Int32ub,
# Number of script layers in this area.
"script_layer_count" / Int32ub,
# Number of data sections in the file.
"data_section_count" / Int32ub,
# Section index for world geometry data. Always 0; starts on materials.
"geometry_section" / Int32ub,
# Section index for script layer data.
"script_layers_section" / Int32ub,
# Section index for generated script object data.
"generated_script_objects_section" / Int32ub,
# Section index for collision data.
"collision_section" / Int32ub,
# Section index for first unknown section.
"unknown_section_1" / Int32ub,
# Section index for light data.
"lights_section" / Int32ub,
# Section index for visibility tree data.
"visibility_tree_section" / Int32ub,
# Section index for path data.
"path_section" / Int32ub,
# Section index for second unknown section.
"unknown_section_2" / Int32ub,
# Section index for portal area data.
"portal_area_section" / Int32ub,
# Section index for static geometry map data.
"static_geometry_map_section" / Int32ub,
# Number of compressed data blocks in the file.
"_compressed_block_count" / Aligned(16, Rebuild(Int32ub, construct.len_(construct.this.section_groups))),
# Array containing the size of each data section in the file. Every size is always a multiple of 32.
"_data_section_sizes" / Aligned(32, DataSectionSizes(construct.this._root.data_section_count)),
"_current_section" / construct.Computed(lambda this: 0),
# Sections. Each group is compressed separately
"section_groups" / FocusedSeq(
"groups",
headers=Aligned(32, Array(construct.this._._compressed_block_count, Struct(
address=Tell,
value=Struct(
"buffer_size" / Int32ub,
"uncompressed_size" / Int32ub,
"compressed_size" / Int32ub,
"section_count" / Int32ub,
),
))),
groups=Aligned(32, Array(
construct.this._._compressed_block_count,
DataSectionInGroup(Struct(
header=Computed(lambda this: this._.headers[this._index]),
section_count=Pointer(lambda this: this.header.address + 12, Int32ub),
data=IfThenElse(
lambda this: this.header.value.compressed_size > 0,
PrefixedWithPaddingBefore(
Computed(lambda this: this.header.value.compressed_size),
LZOCompressedBlock(lambda this: this.header.value.uncompressed_size),
),
Prefixed(Pointer(lambda this: this.header.address + 4, Int32ub), GreedyBytes),
),
)),
)),
),
]
return Struct(*fields)
Prime2MREA = create(0x19, AssetId32)
MREA = Switch(
game_check.get_current_game,
{
game_check.Game.ECHOES: Prime2MREA,
},
construct.Error,
)
|
# -*- coding: utf-8 -*-
"""Computes symmetrical RCC3 relations: 'dc':disconnected, 'po':partial overlap, 'o': occluded/part of
:Author: <NAME> <<EMAIL>>
:Organization: University of Leeds
:Date: 10 September 2014
:Version: 0.1
:Status: Development
:Copyright: STRANDS default
:Notes: future extension to handle polygons, to do that use matplotlib.path.Path.contains_points
although might want to have a read on the following also...
http://matplotlib.1069221.n5.nabble.com/How-to-properly-use-path-Path-contains-point-td40718.html
"""
from __future__ import print_function, division
from qsrlib_qsrs.qsr_abstractclass import QSR_Abstractclass
from qsrlib_io.world_qsr_trace import *
class QSR_RCC3_Rectangle_Bounding_Boxes_2D(QSR_Abstractclass):
"""Make default QSRs and provide an example for others"""
def __init__(self):
self.qsr_type = "rcc3_rectangle_bounding_boxes_2d" # must be the same that goes in the QSR_Lib.__const_qsrs_available
self.qsr_keys = "rcc3"
self.all_possible_relations = ["dc", "po", "o"]
def custom_set_from_config_file(self, document):
pass
def custom_help(self):
"""Write your own help message function"""
print("where,\nx1, y2: the xy-coords of the top-left corner of the rectangle\nx2, y2: the xy-coords of the bottom-right corner of the rectangle")
def custom_checks(self, input_data):
"""Write your own custom checks on top of the default ones
:return: error code, error message (integer, string), use 10 and above for error code as 1-9 are reserved by system
"""
return 0, ""
def custom_checks_for_qsrs_for(self, qsrs_for, error_found):
"""qsrs_for must be tuples of two objects.
:param qsrs_for: list of strings and/or tuples for which QSRs will be computed
:param error_found: if an error was found in the qsrs_for that violates the QSR rules
:return: qsrs_for, error_found
"""
for p in list(qsrs_for):
if (type(p) is not tuple) and (type(p) is not list) and (len(p) != 2):
qsrs_for.remove(p)
error_found = True
return qsrs_for, error_found
def make(self, *args, **kwargs):
"""Make the QSRs
:param args: not used at the moment
:param kwargs:
- input_data: World_Trace
:return: World_QSR_Trace
"""
input_data = kwargs["input_data"]
include_missing_data = kwargs["include_missing_data"]
ret = World_QSR_Trace(qsr_type=self.qsr_type)
for t in input_data.get_sorted_timestamps():
world_state = input_data.trace[t]
timestamp = world_state.timestamp
if kwargs["qsrs_for"]:
qsrs_for, error_found = self.check_qsrs_for_data_exist(world_state.objects.keys(), kwargs["qsrs_for"])
else:
qsrs_for = self.__return_all_possible_combinations(world_state.objects.keys())
if qsrs_for:
for p in qsrs_for:
between = str(p[0]) + "," + str(p[1])
bb1 = world_state.objects[p[0]].return_bounding_box_2d()
bb2 = world_state.objects[p[1]].return_bounding_box_2d()
qsr = QSR(timestamp=timestamp, between=between,
qsr=self.handle_future(kwargs["future"], self.__compute_qsr(bb1, bb2), self.qsr_keys))
ret.add_qsr(qsr, timestamp)
else:
if include_missing_data:
ret.add_empty_world_qsr_state(timestamp)
return ret
# custom functions follow
def __return_all_possible_combinations(self, objects_names):
if len(objects_names) < 2:
return []
ret = []
for i in objects_names:
for j in objects_names:
if i != j:
ret.append((i, j))
return ret
def __compute_qsr(self, bb1, bb2):
"""Return symmetrical RCC3 relation
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: an RCC3 relation from the following: 'dc':disconnected, 'po':partial overlap, 'o': occluded/part of
"""
bboxes_intercept_v, rabx, raxPrbx, raby, rayPrby = self.__bboxes_intercept(bb1, bb2)
if bboxes_intercept_v:
if rabx > 0.0 or raby > 0:
return "po"
else:
occluded_points = self.__count_occluded_points(bb1, bb2)
if occluded_points >= 4:
return "o"
else:
return "po"
else:
return "dc"
def __count_occluded_points(self, bb1, bb2):
occluded_points = 0
bb1_4corners = ((bb1[0], bb1[1]),
(bb1[2], bb1[1]),
(bb1[2], bb1[3]),
(bb1[0], bb1[3]))
bb2_4corners = ((bb2[0], bb2[1]),
(bb2[2], bb2[1]),
(bb2[2], bb2[3]),
(bb2[0], bb2[3]))
for p in bb1_4corners:
if self.__is_point_in_rectangle(p, bb2):
occluded_points += 1
for p in bb2_4corners:
if self.__is_point_in_rectangle(p, bb1):
occluded_points += 1
return occluded_points
def __is_point_in_rectangle(self, p, r, d=0.):
return p[0] >= r[0]-d and p[0] <= r[2]+d and p[1] >= r[0]-d and p[1] <= r[3]+d
def __bboxes_intercept(self, bb1, bb2):
"""
https://rbrundritt.wordpress.com/2009/10/03/determining-if-two-bounding-boxes-overlap/
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return:
"""
# First bounding box, top left corner, bottom right corner
ATLx = bb1[0]
ATLy = bb1[3]
ABRx = bb1[2]
ABRy = bb1[1]
# Second bounding box, top left corner, bottom right corner
BTLx = bb2[0]
BTLy = bb2[3]
BBRx = bb2[2]
BBRy = bb2[1]
rabx = abs(ATLx + ABRx - BTLx - BBRx)
raby = abs(ATLy + ABRy - BTLy - BBRy)
# rAx + rBx
raxPrbx = ABRx - ATLx + BBRx - BTLx
# rAy + rBy
rayPrby = ATLy - ABRy + BTLy - BBRy
if(rabx <= raxPrbx) and (raby <= rayPrby):
return True, rabx, raxPrbx, raby, rayPrby
else:
return False, rabx, raxPrbx, raby, rayPrby
|
<filename>wrappers/python/virgil_crypto_lib/foundation/aes256_cbc.py<gh_stars>10-100
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <<EMAIL>>
from ctypes import *
from ._c_bridge import VscfAes256Cbc
from ._c_bridge import VscfImplTag
from ._c_bridge import VscfStatus
from virgil_crypto_lib.common._c_bridge import Data
from virgil_crypto_lib.common._c_bridge import Buffer
from .alg import Alg
from .encrypt import Encrypt
from .decrypt import Decrypt
from .cipher_info import CipherInfo
from .cipher import Cipher
class Aes256Cbc(Alg, Encrypt, Decrypt, CipherInfo, Cipher):
"""Implementation of the symmetric cipher AES-256 bit in a CBC mode.
Note, this implementation contains dynamic memory allocations,
this should be improved in the future releases."""
# Cipher nfonce length or IV length in bytes, or 0 if nonce is not required.
NONCE_LEN = 16
# Cipher key length in bytes.
KEY_LEN = 32
# Cipher key length in bits.
KEY_BITLEN = 256
# Cipher block length in bytes.
BLOCK_LEN = 16
def __init__(self):
"""Create underlying C context."""
self._lib_vscf_aes256_cbc = VscfAes256Cbc()
self._c_impl = None
self._ctx = None
self.ctx = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_new()
def __delete__(self, instance):
"""Destroy underlying C context."""
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_delete(self.ctx)
def alg_id(self):
"""Provide algorithm identificator."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_alg_id(self.ctx)
return result
def produce_alg_info(self):
"""Produce object with algorithm information and configuration parameters."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_produce_alg_info(self.ctx)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
def restore_alg_info(self, alg_info):
"""Restore algorithm configuration from the given object."""
status = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_restore_alg_info(self.ctx, alg_info.c_impl)
VscfStatus.handle_status(status)
def encrypt(self, data):
"""Encrypt given data."""
d_data = Data(data)
out = Buffer(self.encrypted_len(data_len=len(data)))
status = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_encrypt(self.ctx, d_data.data, out.c_buffer)
VscfStatus.handle_status(status)
return out.get_bytes()
def encrypted_len(self, data_len):
"""Calculate required buffer length to hold the encrypted data."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_encrypted_len(self.ctx, data_len)
return result
def precise_encrypted_len(self, data_len):
"""Precise length calculation of encrypted data."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_precise_encrypted_len(self.ctx, data_len)
return result
def decrypt(self, data):
"""Decrypt given data."""
d_data = Data(data)
out = Buffer(self.decrypted_len(data_len=len(data)))
status = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_decrypt(self.ctx, d_data.data, out.c_buffer)
VscfStatus.handle_status(status)
return out.get_bytes()
def decrypted_len(self, data_len):
"""Calculate required buffer length to hold the decrypted data."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_decrypted_len(self.ctx, data_len)
return result
def set_nonce(self, nonce):
"""Setup IV or nonce."""
d_nonce = Data(nonce)
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_set_nonce(self.ctx, d_nonce.data)
def set_key(self, key):
"""Set cipher encryption / decryption key."""
d_key = Data(key)
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_set_key(self.ctx, d_key.data)
def start_encryption(self):
"""Start sequential encryption."""
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_start_encryption(self.ctx)
def start_decryption(self):
"""Start sequential decryption."""
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_start_decryption(self.ctx)
def update(self, data):
"""Process encryption or decryption of the given data chunk."""
d_data = Data(data)
out = Buffer(self.out_len(data_len=len(data)))
self._lib_vscf_aes256_cbc.vscf_aes256_cbc_update(self.ctx, d_data.data, out.c_buffer)
return out.get_bytes()
def out_len(self, data_len):
"""Return buffer length required to hold an output of the methods
"update" or "finish" in an current mode.
Pass zero length to define buffer length of the method "finish"."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_out_len(self.ctx, data_len)
return result
def encrypted_out_len(self, data_len):
"""Return buffer length required to hold an output of the methods
"update" or "finish" in an encryption mode.
Pass zero length to define buffer length of the method "finish"."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_encrypted_out_len(self.ctx, data_len)
return result
def decrypted_out_len(self, data_len):
"""Return buffer length required to hold an output of the methods
"update" or "finish" in an decryption mode.
Pass zero length to define buffer length of the method "finish"."""
result = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_decrypted_out_len(self.ctx, data_len)
return result
def finish(self):
"""Accomplish encryption or decryption process."""
out = Buffer(self.out_len(data_len=0))
status = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_finish(self.ctx, out.c_buffer)
VscfStatus.handle_status(status)
return out.get_bytes()
@classmethod
def take_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_aes256_cbc = VscfAes256Cbc()
inst.ctx = c_ctx
return inst
@classmethod
def use_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_aes256_cbc = VscfAes256Cbc()
inst.ctx = inst._lib_vscf_aes256_cbc.vscf_aes256_cbc_shallow_copy(c_ctx)
return inst
@property
def c_impl(self):
return self._c_impl
@property
def ctx(self):
return self._ctx
@ctx.setter
def ctx(self, value):
self._ctx = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_shallow_copy(value)
self._c_impl = self._lib_vscf_aes256_cbc.vscf_aes256_cbc_impl(self.ctx)
|
<reponame>jpodivin/pystrand
import multiprocessing as mp
import uuid
from pystrand.populations import BasePopulation
from pystrand.selections import RouletteSelection, ElitismSelection, BaseSelection
from pystrand.mutations import BaseMutation, PointMutation
from pystrand.loggers.csv_logger import CsvLogger
from pystrand.loggers.details import RunDetails
class BaseOptimizer:
"""Base optimizer class.
Parameters
----------
fitness_function : BaseFunction
provides mapping from genotype to a fitness value, [0, 1]
max_iterations : int
0 by default
population : Population
Seed population, can include known sub-optimal solutions.
mutation_prob : float
0.001 by default
mutation_ops :
Mutation operator to use on genotypes.
Uses supplied mutation_prob. If None, defaults to PointMutation.
None by default.
crossover_prob : float
0.0 by default, no crossover will take place
selection_ops :
selected_fraction :
log_path :
parallelize : bool
Use multiprocessing to evaluate genomes in parallel?
Raises
------
TypeError
If supplied wrong selection method type.
If supplied mutation_op not subclassing BaseMutation.
"""
def __init__(self,
population,
max_iterations=0,
fitness_function=None,
mutation_prob=0.001,
mutation_ops=None,
crossover_prob=0.0,
selection_ops='roulette',
selected_fraction=0.1,
log_path=None,
parallelize=False,
**kwargs):
"""For each element in list of selection methods we check the type.
Only Selection and string are accepted, other types raise TypeError.
The strings must be reckognized as names of algorithm,
any other string will result in ValueError.
"""
self._optimizer_uuid = str(uuid.uuid1())
self._fitness_function = fitness_function
if mutation_ops:
if isinstance(mutation_ops, list):
self._mutation_ops = mutation_ops
elif issubclass(type(mutation_ops), BaseMutation):
self._mutation_ops = [mutation_ops]
else:
raise TypeError(
'Invalid mutation operator.',
type(mutation_ops))
else:
self._mutation_ops = [PointMutation(mutation_prob)]
if log_path:
self.logger = CsvLogger(log_path=log_path)
if kwargs.get('save_details'):
self.details_logger = RunDetails(log_path=log_path)
else:
self.details_logger = None
else:
self.logger = None
self.details_logger = None
self._crossover_probability = crossover_prob
self._selection_methods = []
self._parallelize = parallelize
self._population = population
self._max_iterations = max_iterations
#First we turn selection_methods into list, in case it isn't.
if not isinstance(selection_ops, list):
selection_ops = [selection_ops]
for selection_method in selection_ops:
if isinstance(selection_method, str):
if selection_method == 'roulette':
self._selection_methods += [RouletteSelection(selected_fraction)]
elif selection_method == 'elitism':
self._selection_methods += [ElitismSelection(selected_fraction)]
else:
raise ValueError(
'Unknown selection algorithm name.',
selection_method)
elif isinstance(selection_method, BaseSelection):
self._selection_methods += [selection_method]
else:
raise TypeError(
'Invalid selection type.',
type(selection_method))
def evaluate_individual(self, individual):
"""Return fitness value of the given individual.
"""
return self._fitness_function(individual)
def evaluate_population(self):
"""Apply set fitness function to every individual in _population
in either sequential or parallel manner depending on value of
the _paralelize attribute. And store result in the 'fitness' field.
"""
evaluated_individuals = self._population.individuals
if self._parallelize:
with mp.Pool() as worker_pool:
result = worker_pool.map_async(
self._fitness_function,
evaluated_individuals['genotype']).get(5)
evaluated_individuals['fitness'] = result
else:
evaluated_individuals['fitness'] = [
self._fitness_function(individual)
for individual
in evaluated_individuals['genotype']]
self._population.replace_individuals(evaluated_individuals)
def select_genomes(self):
"""Create new population by sequentially applying selection operators
in the order they were given to __init__.
Expand the new population to match the original one.
"""
new_population = BasePopulation(
0,
self._population.genome_shapes,
self._population.gene_values)
for selection_method in self._selection_methods:
new_population.append_individuals(
selection_method.select(self._population))
new_population.expand_population(
self._population.population_size)
self._population = new_population
def fit(self, fitnes_function=None, verbose=1):
"""Main training loop.
Return statistics of the run as dictionary of lists.
Parameters
----------
fitness_function: BaseFunction
verbose : int
If not '0' outputs statistics using print every generation.
Default is 1.
"""
if fitnes_function:
self._fitness_function = fitnes_function
elif not self._fitness_function:
raise RuntimeError("No fitness function supplied")
run_id = uuid.uuid1()
history = {
"iteration" : [],
"max_fitness" : [],
"min_fitness" : [],
"fitness_avg" : [],
"fitness_std" : []}
iteration = 0
while iteration < self._max_iterations:
try:
self.evaluate_population()
except mp.TimeoutError as timeoutException:
print(
"Population evaluation timed out, with exception {}.".format(
timeoutException))
break
history["iteration"].append(iteration)
history["max_fitness"].append(self._population.max_fitness)
history["min_fitness"].append(self._population.min_fitness)
history["fitness_avg"].append(self._population.avg_fitness)
history["fitness_std"].append(self._population.fitness_std)
if verbose > 0:
print(" // ".join(
[key + ": " + str(record[-1]) for key, record in history.items()]
))
if self._population.max_fitness == 1.0:
break
self.select_genomes()
self._population.mutate_genotypes(mutation_ops=self._mutation_ops)
if self._crossover_probability > 0.0:
self._population.cross_genomes(
crossover_prob=self._crossover_probability)
iteration += 1
if self.logger:
self.logger.save_history(history, run_id=run_id)
if self.details_logger:
self.details_logger.save_run_details(self)
return history
@property
def population(self):
"""Return optimized population.
"""
return self._population
@property
def optimizer_uuid(self):
"""Return uuid of the optimizer.
"""
return self._optimizer_uuid
|
<reponame>titonbarua/sphotik
import os.path
import sqlite3
import logging
from collections import deque, Counter
class HistoryManager:
SCHEMA = """
CREATE TABLE history(
roman_text TEXT NOT NULL,
bangla_text TEXT NOT NULL,
usecount INTEGER NOT NULL DEFAULT 1,
PRIMARY KEY (roman_text, bangla_text)
);
"""
def __init__(
self,
histfilepath,
input_generalizer=lambda x: x,
session_history_size=1000,
session_history_concern_size=20,):
# An input generalizer is a function that may be used to
# generalize the input data, so that history suggestions can be
# laxed and fuzzy; trading their accuracy in return.
self.input_generalizer = input_generalizer
# Session history is an in-memory sequence of most recently used
# words and their input texts. The most likely conversion of a word
# can be deduced by filtering the sequence by respective input text
# and doing a frequency analysis on the used output texts.
self.session_history_concern_size = session_history_concern_size
self.session_history = deque(maxlen=session_history_size)
if not os.path.isfile(histfilepath):
# Create the history file with proper permissions.
with open(histfilepath, "w") as f:
os.chmod(histfilepath, 0o600)
# Write database schema.
self.conn = sqlite3.connect(histfilepath)
with self.conn:
self.conn.execute(self.SCHEMA.strip())
else:
# Open an existing database.
try:
self.conn = sqlite3.connect(histfilepath)
except sqlite3.DatabaseError as e:
self.conn = None
logging.warning(
"Failed to open history file '{}': {}"
.format(dbfilepath, e))
QUERY_SEARCH = """
SELECT bangla_text, usecount FROM history
WHERE roman_text = :roman_text ORDER BY usecount DESC;
"""
def _split_trailing_punctuations_from_cord(self, cord, puncs):
split_at = len(cord)
for bead in reversed(cord):
if bead.v in puncs:
split_at += -1
else:
break
return cord[:split_at], cord[split_at:]
def search_without_punctuation(self, parser):
# Collect with-punctuation search results.
results = self.search(parser.input_text)
# Split the cord into a punctuation-less head
# and a punctuation tail.
head, tail = self._split_trailing_punctuations_from_cord(
parser.cord, parser.rule.punctuations)
# Workflow:
# - Extract strings from punctuation-less head
# and punctuation tail.
# - Find outputs for punctuation-trimmed input.
# - Join back the punctuation to the suggested output.
input_t = parser.render_input_text(tail)
if len(input_t) > 0:
input_h = parser.render_input_text(head)
for output, count in self.search(input_h).items():
results[output + parser.render_text(tail)] = count
return results
def search(self, roman_text):
return self._search(self.input_generalizer(roman_text))
def _search(self, roman_text):
# Fetch results from memory. The work flow of the following
# comprehension is as followes:
# - Find all the elements having target roman text,
# newest first.
#
# - Cut the above sequence upto 'concern size' length.
# This is done because we only want to do frequency
# analysis on most recent data.
#
# - Count the converted bangla texts and order them
# according to their relative frequency.
hist = Counter(
[
bt for rt, bt
in reversed(self.session_history)
if rt == roman_text
][:self.session_history_concern_size]
)
if hist:
return hist
# Fetch results from disk, as we didn't find them in memory.
#---------------------------------------------------------------\
if self.conn is None:
return Counter()
try:
hist = Counter()
with self.conn:
result = self.conn.execute(
self.QUERY_SEARCH, {"roman_text": roman_text})
for bangla_text, freq in result:
hist[bangla_text] = freq
return hist
except sqlite3.Error as e:
logging.exception("Could not read history from disk.")
return Counter()
#----------------------------------------------------------------/
QUERY_SAVE_NEW = """
INSERT INTO history (bangla_text, roman_text, usecount)
VALUES (:bangla_text, :roman_text, 1);
"""
QUERY_UPDATE_OLD = """
UPDATE history SET usecount = usecount + 1
WHERE roman_text = :roman_text AND bangla_text = :bangla_text;
"""
def _split_trailing_punctuations_from_text(self, text, puncs):
split_at = len(text)
for c in reversed(text):
if c in puncs:
split_at += -1
else:
break
return text[:split_at], text[split_at:]
def save_without_punctuation(self, parser, bangla_text):
# Save with punctuation.
self.save(parser.input_text, bangla_text)
# Get punctuationless head from the input.
inp_head, _ = self._split_trailing_punctuations_from_cord(
parser.cord, parser.rule.punctuations)
# Get punctuationless head from the output.
outp_head, _ = self._split_trailing_punctuations_from_text(
bangla_text, parser.rule.punctuations)
# Save without punctuation.
self.save(parser.render_input_text(inp_head), outp_head)
def save(self, roman_text, bangla_text):
return self._save(self.input_generalizer(roman_text), bangla_text)
def _save(self, roman_text, bangla_text):
# Save data to memory.
self.session_history.append((roman_text, bangla_text))
# Save data to disk.
#-----------------------------------------------------------------\
if self.conn is None:
return
try:
with self.conn:
values = {
"roman_text": roman_text,
"bangla_text": bangla_text}
result = self.conn.execute(self.QUERY_UPDATE_OLD, values)
if result.rowcount == 0:
self.conn.execute(self.QUERY_SAVE_NEW, values)
except sqlite3.Error as e:
logging.exception("Could not save history to disk.")
#-----------------------------------------------------------------/
|
<gh_stars>1-10
import itertools
import logging
import os
import shutil
from typing import List, Tuple, Dict, TypeVar, Generator
import numpy as np
from .segment_quality_utils import HMMSegmentationQualityCalculator
from .. import types
from ..io import io_consts, io_commons, io_denoising_calling, io_intervals_and_counts
from ..models.model_denoising_calling import DenoisingModelConfig, CopyNumberCallingConfig, \
HHMMClassAndCopyNumberBasicCaller
from ..models.theano_hmm import TheanoForwardBackward, TheanoViterbi
from ..structs.interval import Interval
from ..structs.metadata import IntervalListMetadata
from ..structs.metadata import SampleMetadataCollection
from ..structs.segment import IntegerCopyNumberSegment
_logger = logging.getLogger(__name__)
class ViterbiSegmentationEngine:
"""This class runs the forward-backward and Viterbi algorithm on gCNV model/calls shards for a single sample,
obtains constant copy-number segments, calculates various quality metrics, and saves the result to disk.
Note:
It is assumed that the model and calls shards are provided in order according to the SAM sequence dictionary.
It is not checked or enforced here.
"""
def __init__(self,
model_shards_paths: List[str],
calls_shards_paths: List[str],
sample_metadata_collection: SampleMetadataCollection,
sample_index: int,
output_path: str):
"""Initializer.
Args:
model_shards_paths: list of paths to model shards
calls_shards_paths: list of paths to calls shards
sample_metadata_collection: sample metadata collection (must contain sample being analyzed)
sample_index: index of the sample in the callset
output_path: output path for writing segmentation results
"""
try:
self._validate_args(model_shards_paths, calls_shards_paths, sample_metadata_collection, sample_index)
except AssertionError as ex:
raise Exception("Inconsistency detected in the provided model and calls shards.") from ex
self.sample_index = sample_index
self.output_path = output_path
self.calls_shards_paths = calls_shards_paths
self.sample_metadata_collection = sample_metadata_collection
self.denoising_config = self._get_denoising_config(model_shards_paths[0])
self.calling_config = self._get_calling_config(model_shards_paths[0])
# assemble scattered global entities (interval list, log_q_tau_tk)
_logger.info("Assembling interval list and copy-number class posterior from model shards...")
self.interval_list: List[Interval] = []
log_q_tau_tk_shards: Tuple[np.ndarray] = ()
for model_path in model_shards_paths:
self.interval_list += self._get_interval_list_from_model_shard(model_path)
log_q_tau_tk_shards += (self._get_log_q_tau_tk_from_model_shard(model_path),)
self.log_q_tau_tk: np.ndarray = np.concatenate(log_q_tau_tk_shards, axis=0)
# extract SAM header lines from one of the interval lists
self.interval_list_sam_header_lines = io_intervals_and_counts.extract_sam_header_from_file(
os.path.join(model_shards_paths[0], io_consts.default_interval_list_filename))
# sample names
self.sample_name = self._get_sample_name_from_calls_shard(calls_shards_paths[0], sample_index)
# interval list metadata
interval_list_metadata: IntervalListMetadata = IntervalListMetadata(self.interval_list)
self.ordered_contig_list = interval_list_metadata.ordered_contig_list
self.contig_interval_indices = interval_list_metadata.contig_interval_indices
self.contig_interval_lists: Dict[str, List[Interval]] = {
contig: [self.interval_list[ti] for ti in self.contig_interval_indices[contig]]
for contig in self.ordered_contig_list}
# cnv stay probability for each contig
self.cnv_stay_prob_t_j: Dict[str, np.ndarray] = dict()
for contig in self.ordered_contig_list:
contig_interval_list = self.contig_interval_lists[contig]
dist_t = np.asarray([contig_interval_list[ti + 1].distance(contig_interval_list[ti])
for ti in range(len(contig_interval_list) - 1)], dtype=types.floatX)
self.cnv_stay_prob_t_j[contig] = np.exp(-dist_t / self.calling_config.cnv_coherence_length)
# forward-backward algorithm
_logger.info("Compiling theano forward-backward function...")
self.theano_forward_backward = TheanoForwardBackward(
log_posterior_probs_output_tc=None,
resolve_nans=False,
do_thermalization=False,
do_admixing=False,
include_update_size_output=False,
include_alpha_beta_output=True)
# viterbi algorithm
_logger.info("Compiling theano Viterbi function...")
self.theano_viterbi = TheanoViterbi()
# copy-number HMM specs generator
_logger.info("Compiling theano variational HHMM...")
self.get_copy_number_hmm_specs = HHMMClassAndCopyNumberBasicCaller\
.get_compiled_copy_number_hmm_specs_theano_func()
def _viterbi_segments_generator(self) -> Generator[IntegerCopyNumberSegment, None, None]:
"""Performs Viterbi segmentation and segment quality calculation for a single sample in
the call-set and returns a generator for segments.
Returns:
a generator for segments
"""
# load copy number log emission for the sample
copy_number_log_emission_tc_shards = ()
for calls_path in self.calls_shards_paths:
copy_number_log_emission_tc_shards += (self._get_log_copy_number_emission_tc_from_calls_shard(
calls_path, self.sample_index),)
copy_number_log_emission_tc = np.concatenate(copy_number_log_emission_tc_shards, axis=0)
# iterate over contigs and perform segmentation
sample_name = self.sample_name
for contig_index, contig in enumerate(self.ordered_contig_list):
_logger.info("Segmenting contig ({0}/{1}) (contig name: {2})...".format(
contig_index + 1, len(self.ordered_contig_list), contig))
# copy-number prior probabilities for each class
contig_baseline_copy_number = self.sample_metadata_collection\
.get_sample_ploidy_metadata(sample_name)\
.get_contig_ploidy(contig)
pi_jkc = HHMMClassAndCopyNumberBasicCaller.get_copy_number_prior_for_sample_jkc(
self.calling_config.num_copy_number_states,
self.calling_config.p_alt,
np.asarray([contig_baseline_copy_number], dtype=types.med_uint))
# contig interval list and indices
contig_interval_list = self.contig_interval_lists[contig]
contig_interval_indices = self.contig_interval_indices[contig]
# mapping from intervals to contig index (since we have a single contig, all intervals map to index=0)
t_to_j_map = np.zeros((len(contig_interval_list),), dtype=types.med_uint)
# copy-number class log probability
log_q_tau_tk = self.log_q_tau_tk[contig_interval_indices, :]
# copy-number log emission probability for contig intervals
copy_number_log_emission_contig_tc = copy_number_log_emission_tc[contig_interval_indices, :]
# get HMM specs
hmm_specs = self.get_copy_number_hmm_specs(
pi_jkc, self.cnv_stay_prob_t_j[contig], log_q_tau_tk, t_to_j_map)
log_prior_c = hmm_specs[0]
log_trans_contig_tcc = hmm_specs[1]
# run forward-back algorithm
fb_result = self.theano_forward_backward.perform_forward_backward(
log_prior_c, log_trans_contig_tcc, copy_number_log_emission_contig_tc)
log_posterior_prob_tc = fb_result.log_posterior_probs_tc
log_data_likelihood = fb_result.log_data_likelihood
alpha_tc = fb_result.alpha_tc
beta_tc = fb_result.beta_tc
# run viterbi algorithm
viterbi_path_t_contig = self.theano_viterbi.get_viterbi_path(
log_prior_c, log_trans_contig_tcc, copy_number_log_emission_contig_tc)
# initialize the segment quality calculator
segment_quality_calculator: HMMSegmentationQualityCalculator = HMMSegmentationQualityCalculator(
copy_number_log_emission_contig_tc, log_trans_contig_tcc,
alpha_tc, beta_tc, log_posterior_prob_tc, log_data_likelihood)
# coalesce into piecewise constant copy-number segments, calculate qualities
for call_copy_number, start_index, end_index in self._coalesce_seq_into_segments(viterbi_path_t_contig):
num_points = end_index - start_index + 1
segment = IntegerCopyNumberSegment(contig,
contig_interval_list[start_index].start,
contig_interval_list[end_index].end,
num_points,
call_copy_number,
contig_baseline_copy_number)
if num_points > 1:
segment.quality_some_called = segment_quality_calculator.get_segment_quality_some_called(
start_index, end_index, call_copy_number)
segment.quality_all_called = segment_quality_calculator.get_segment_quality_all_called(
start_index, end_index, call_copy_number)
segment.quality_start = segment_quality_calculator.get_segment_quality_start(
start_index, call_copy_number)
segment.quality_end = segment_quality_calculator.get_segment_quality_end(
end_index, call_copy_number)
else: # for single-interval segments, all qualities must be the same
segment.quality_some_called = segment_quality_calculator.get_segment_quality_some_called(
start_index, end_index, call_copy_number)
segment.quality_all_called = segment.quality_some_called
segment.quality_start = segment.quality_some_called
segment.quality_end = segment.quality_some_called
yield segment
def write_copy_number_segments(self):
"""Performs Viterbi segmentation and segment quality calculation for a single sample in
the call-set and saves the results to disk.
"""
sample_name = self.sample_name
_logger.info("Processing sample index: {0}, sample name: {1}...".format(self.sample_index, sample_name))
sample_output_path = os.path.join(self.output_path, io_consts.sample_folder_prefix + repr(self.sample_index))
io_commons.assert_output_path_writable(sample_output_path, try_creating_output_path=True)
# write configs, gcnvkernel version and sample name to output path
shutil.copy(os.path.join(self.calls_shards_paths[0], io_consts.default_denoising_config_json_filename),
sample_output_path)
shutil.copy(os.path.join(self.calls_shards_paths[0], io_consts.default_calling_config_json_filename),
sample_output_path)
io_commons.write_gcnvkernel_version(sample_output_path)
io_commons.write_sample_name_to_txt_file(sample_output_path, sample_name)
seg_file = os.path.join(sample_output_path, io_consts.default_copy_number_segments_tsv_filename)
with open(seg_file, 'w') as of:
# copy SAM header lines from model/calls interval list
for sam_header_line in self.interval_list_sam_header_lines:
of.write(sam_header_line + '\n')
# add sample name header
of.write('@' + io_consts.sample_name_sam_header_prefix + sample_name + '\n')
# add table column headers
of.write(IntegerCopyNumberSegment.get_header_column_string() + '\n')
# add segments
for segment in self._viterbi_segments_generator():
of.write(repr(segment) + '\n')
@staticmethod
def _validate_args(model_shards_paths: List[str],
calls_shards_paths: List[str],
sample_metadata_collection: SampleMetadataCollection,
sample_index: int):
assert len(model_shards_paths) > 0, "At least one model shard must be provided."
assert len(calls_shards_paths) == len(model_shards_paths),\
"The number of model shards ({0}) and calls shards ({1}) must match.".format(
len(model_shards_paths), len(calls_shards_paths))
assert sample_index >= 0, "Sample index must be an integer non-negative number"
scattered_sample_names: List[str] = []
for model_path, calls_path in zip(model_shards_paths, calls_shards_paths):
# assert interval lists are identical
model_interval_list_file = os.path.join(model_path, io_consts.default_interval_list_filename)
calls_interval_list_file = os.path.join(calls_path, io_consts.default_interval_list_filename)
io_commons.assert_files_are_identical(model_interval_list_file, calls_interval_list_file)
# assert gcnvkernel versions are identical
model_gcnvkernel_version_file = os.path.join(model_path, io_consts.default_gcnvkernel_version_json_filename)
calls_gcnvkernel_version_file = os.path.join(calls_path, io_consts.default_gcnvkernel_version_json_filename)
try:
io_commons.assert_files_are_identical(model_gcnvkernel_version_file, calls_gcnvkernel_version_file)
except AssertionError:
_logger.warning("Different gcnvkernel versions between model and calls -- proceeding at your own risk!")
# assert denoising configs are identical
model_denoising_config_file = os.path.join(model_path, io_consts.default_denoising_config_json_filename)
calls_denoising_config_file = os.path.join(calls_path, io_consts.default_denoising_config_json_filename)
try:
io_commons.assert_files_are_identical(model_denoising_config_file, calls_denoising_config_file)
except AssertionError:
_logger.warning("Different denoising configuration between model and calls -- "
"proceeding at your own risk!")
# assert callings configs are identical
model_calling_config_file = os.path.join(model_path, io_consts.default_calling_config_json_filename)
calls_calling_config_file = os.path.join(calls_path, io_consts.default_calling_config_json_filename)
try:
io_commons.assert_files_are_identical(model_calling_config_file, calls_calling_config_file)
except AssertionError:
_logger.warning("Different calling configuration between model and calls -- "
"proceeding at your own risk!")
# extract and store sample names for the current shard
scattered_sample_names.append(
ViterbiSegmentationEngine._get_sample_name_from_calls_shard(calls_path, sample_index))
# all scattered calls have the same set of samples and in the same order
assert len(set(scattered_sample_names)) == 1,\
"The calls shards contain different sample names and/or different number of samples."
# all samples have ploidy calls in the metadata collection
sample_names = list(scattered_sample_names[0])
sample_metadata_collection.all_samples_have_ploidy_metadata(sample_names)
@staticmethod
def _get_sample_name_from_calls_shard(calls_path: str, sample_index: int) -> str:
sample_posteriors_path = io_denoising_calling.get_sample_posterior_path(calls_path, sample_index)
if not os.path.isdir(sample_posteriors_path):
raise Exception("Could not find any sample posterior calls in {0} for sample with index {1}.".
format(calls_path, sample_index))
sample_name = io_commons.get_sample_name_from_txt_file(sample_posteriors_path)
return sample_name
@staticmethod
def _get_denoising_config(input_path: str) -> DenoisingModelConfig:
return DenoisingModelConfig.from_json_file(os.path.join(
input_path, io_consts.default_denoising_config_json_filename))
@staticmethod
def _get_calling_config(input_path: str) -> CopyNumberCallingConfig:
return CopyNumberCallingConfig.from_json_file(os.path.join(
input_path, io_consts.default_calling_config_json_filename))
@staticmethod
def _get_interval_list_from_model_shard(model_path: str) -> List[Interval]:
interval_list_file = os.path.join(model_path, io_consts.default_interval_list_filename)
return io_intervals_and_counts.load_interval_list_tsv_file(interval_list_file)
@staticmethod
def _get_log_q_tau_tk_from_model_shard(model_path: str) -> np.ndarray:
return io_commons.read_ndarray_from_tsv(os.path.join(
model_path, io_consts.default_class_log_posterior_tsv_filename))
@staticmethod
def _get_log_copy_number_emission_tc_from_calls_shard(calls_path: str, sample_index: int):
return io_denoising_calling.SampleDenoisingAndCallingPosteriorsReader.\
read_ndarray_tc_with_copy_number_header(
io_denoising_calling.get_sample_posterior_path(calls_path, sample_index),
io_consts.default_copy_number_log_emission_tsv_filename)
@staticmethod
def _coalesce_seq_into_segments(seq: List[TypeVar('_T')]) -> List[Tuple[TypeVar('_T'), int, int]]:
"""Coalesces a sequence of objects into piecewise constant segments, along with start and end indices
for each constant segment.
Example:
seq = ['a', 'a', 'a', 'a', 'b', 'c', 'c', 'a', 'a', 'a']
result = [('a', 0, 3), ('b', 4, 4), ('c', 5, 6), ('a', 7, 9)]
Args:
seq: a sequence of objects that implement __equals__
Returns:
a generator for (object, start_index, end_index)
"""
for seg in itertools.groupby(enumerate(seq), key=lambda elem: elem[1]):
seg_const = seg[0]
grouper = seg[1]
start_index = grouper.__next__()[0]
end_index = start_index
try:
while True:
end_index = grouper.__next__()[0]
except StopIteration:
pass
yield (seg_const, start_index, end_index)
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, <NAME>
# All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from functools import partial
import pytest
from statechart import (CompositeState, Event, State, InitialState, Statechart, Transition)
@pytest.fixture
def empty_statechart():
statechart = Statechart(name='statechart')
return statechart
class TestTransition:
class StateSpy(CompositeState):
def __init__(self, name, context):
super().__init__(name=name, context=context)
# Count state entries and exit
self.entries = 0
self.exits = 0
init = InitialState(self)
self.default = State(name='default', context=self)
self.local = State(name='local', context=self)
Transition(start=init, end=self.default)
def entry(self, event):
self.entries += 1
def exit(self, event):
self.exits += 1
def test_create_transition(self, empty_statechart):
initial_state = InitialState(empty_statechart)
next_state = State(name='next', context=empty_statechart)
transition = Transition(start=initial_state, end=next_state)
# The transition should be added to the initial state's list of
# outgoing transitions
assert transition in initial_state.transitions
# When executed, the transition should be setup to deactivate the
# initial state and to activate the next state
assert initial_state in transition.deactivate
assert next_state in transition.activate
def test_create_cyclic_transition(self, empty_statechart):
next_state = State(name='next', context=empty_statechart)
transition = Transition(start=next_state, end=next_state)
# The transition should be added to the initial state's list of
# outgoing transitions.
assert transition in next_state.transitions
# When executed, the transition should be setup to deactivate the
# next state and to re-activate it.
assert next_state in transition.deactivate
assert next_state in transition.activate
def test_external_transition(self, empty_statechart):
init = InitialState(empty_statechart)
state_spy = self.StateSpy(name='spy', context=empty_statechart)
Transition(start=init, end=state_spy)
Transition(start=state_spy, end=state_spy, event='extern')
empty_statechart.start()
assert empty_statechart.is_active('spy')
assert state_spy.entries is 1
assert state_spy.exits is 0
empty_statechart.dispatch(Event('extern'))
# After dispatching the external event from the state spy, the
# state should be deactivated and activated again.
assert empty_statechart.is_active('spy')
assert state_spy.entries is 2
assert state_spy.exits is 1
def test_local_transition(self, empty_statechart):
init = InitialState(empty_statechart)
state_spy = self.StateSpy(name='spy', context=empty_statechart)
Transition(start=init, end=state_spy)
Transition(start=state_spy, end=state_spy.local, event=Event('local'))
empty_statechart.start()
assert empty_statechart.is_active('spy')
assert empty_statechart.is_active('default')
assert state_spy.entries is 1
assert state_spy.exits is 0
empty_statechart.dispatch(Event('local'))
assert empty_statechart.is_active('spy')
assert not empty_statechart.is_active('default')
assert empty_statechart.is_active('local')
assert state_spy.entries is 1
assert state_spy.exits is 0
def test_deep_local_transitions(self, empty_statechart):
sc = empty_statechart
init = InitialState(sc)
top = CompositeState(name='top', context=sc)
top_init = InitialState(top)
middle_a = CompositeState(name='middle_a', context=top)
middle_b = CompositeState(name='middle_b', context=top)
middle_a_init = InitialState(middle_a)
bottom_a1 = State(name='bottom_a1', context=middle_a)
bottom_a2 = State(name='bottom_a2', context=middle_a)
middle_b_init = InitialState(middle_b)
bottom_b1 = State(name='bottom_b1', context=middle_b)
bottom_b2 = State(name='bottom_b2', context=middle_b)
# Setup default transitions
Transition(start=init, end=top)
Transition(start=top_init, end=middle_a)
Transition(start=middle_a_init, end=bottom_a1)
Transition(start=middle_b_init, end=bottom_b1)
# Setup events to trigger transitions
a_to_b = Event('a_to_b')
a1_to_a2 = Event('a1_to_a2')
b1_to_b2 = Event('b1_to_b2')
top_to_middle_a = Event('top_to_middle_a')
top_to_middle_b = Event('top_to_middle_b')
middle_a_to_a1 = Event('middle_a_to_a1')
middle_a_to_a2 = Event('middle_a_to_a2')
middle_b_to_b1 = Event('middle_b_to_b1')
middle_b_to_b2 = Event('middle_b_to_b2')
# Setup external transitions
Transition(start=middle_a, end=middle_b, event=a_to_b)
Transition(start=bottom_a1, end=bottom_a2, event=a1_to_a2)
Transition(start=bottom_a1, end=bottom_a2, event=b1_to_b2)
# Setup local transitions
Transition(start=top, end=middle_a, event=top_to_middle_a)
Transition(start=top, end=middle_b, event=top_to_middle_b)
Transition(start=middle_a, end=bottom_a1, event=middle_a_to_a1)
Transition(start=middle_a, end=bottom_a2, event=middle_a_to_a2)
Transition(start=middle_b, end=bottom_b1, event=middle_b_to_b1)
Transition(start=middle_b, end=bottom_b2, event=middle_b_to_b2)
sc.start()
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a1')
sc.dispatch(middle_a_to_a2)
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a2')
sc.dispatch(top_to_middle_b)
assert sc.is_active('top')
assert sc.is_active('middle_b')
assert sc.is_active('bottom_b1')
sc.dispatch(top_to_middle_a)
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a1')
sc.dispatch(a_to_b)
assert sc.is_active('top')
assert sc.is_active('middle_b')
assert sc.is_active('bottom_b1')
sc.dispatch(middle_b_to_b2)
assert sc.is_active('top')
assert sc.is_active('middle_b')
assert sc.is_active('bottom_b2')
def test_transition_hierarchy(self, empty_statechart):
sc = empty_statechart
init = InitialState(sc)
top = CompositeState(name='top', context=sc)
top_init = InitialState(top)
middle_a = CompositeState(name='middle_a', context=top)
middle_b = CompositeState(name='middle_b', context=top)
middle_a_init = InitialState(middle_a)
bottom_a1 = State(name='bottom_a1', context=middle_a)
bottom_a2 = State(name='bottom_a2', context=middle_a)
middle_b_init = InitialState(middle_b)
bottom_b1 = State(name='bottom_b1', context=middle_b)
# Setup default transitions
Transition(start=init, end=top)
Transition(start=top_init, end=middle_a)
Transition(start=middle_a_init, end=bottom_a1)
Transition(start=middle_b_init, end=bottom_b1)
# Setup event triggers
across = Event('across')
up = Event('up')
# Setup external transitions
Transition(start=bottom_a1, end=bottom_a2, event=across)
Transition(start=bottom_a2, end=middle_b, event=up)
Transition(start=middle_b, end=middle_a, event=across)
sc.start()
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a1')
sc.dispatch(across)
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a2')
sc.dispatch(up)
assert sc.is_active('top')
assert sc.is_active('middle_b')
assert sc.is_active('bottom_b1')
sc.dispatch(across)
assert sc.is_active('top')
assert sc.is_active('middle_a')
assert sc.is_active('bottom_a1')
def test_transition_event_consumed(self, empty_statechart):
sc = empty_statechart
init = InitialState(sc)
# a = State(name='a', context=sc)
b = State(name='b', context=sc)
cs = CompositeState(name='cs', context=sc)
cs_init = InitialState(cs)
cs_a = State(name='cs a', context=cs)
cs_b = State(name='cs b', context=cs)
Transition(start=init, end=cs)
Transition(start=cs, end=cs_init)
Transition(start=cs_init, end=cs_a)
Transition(start=cs_a, end=cs_b, event='home')
Transition(start=cs, end=b, event='home')
sc.start()
assert sc.is_active('cs a')
sc.dispatch(Event('home'))
assert sc.is_active('cs b')
def test_transition_action_function(self, empty_statechart):
self.state = False
def set_state(state):
self.state = bool(state)
set_true = partial(set_state, True)
sc = empty_statechart
initial = InitialState(sc)
default = State(name='default', context=sc)
next = State(name='next', context=sc)
Transition(start=initial, end=default)
Transition(start=default, end=next, event='next', action=set_true)
sc.start()
sc.dispatch(Event('next'))
assert self.state
def test_transition_action_function_with_event(self, empty_statechart):
self.state = False
def set_state(event):
self.state = event.data['state']
sc = empty_statechart
initial = InitialState(sc)
default = State(name='default', context=sc)
next = State(name='next', context=sc)
Transition(start=initial, end=default)
Transition(start=default, end=next, event='next', action=set_state)
sc.start()
sc.dispatch(Event(name='next', data={'state': True}))
assert self.state
def test_transition_action_function_with_metadata(self, empty_statechart):
sc = empty_statechart
sc.metadata.state = True
self.state = False
def set_state(event):
self.state = sc.metadata.state
initial = InitialState(sc)
default = State(name='default', context=sc)
next = State(name='next', context=sc)
Transition(start=initial, end=default)
Transition(start=default, end=next, event='next', action=set_state)
sc.start()
sc.dispatch(Event('next'))
assert self.state
|
# -*- coding: utf-8 -*-
# Copyright 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####################################################################################################
### Adapted by GComyn on December 19, 2016
####################################################################################################
from __future__ import absolute_import
''' This adapter will download stories from the site unknowableroom.org '''
import logging
import re
import sys
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six.moves.urllib.error import HTTPError
from .base_adapter import BaseSiteAdapter, makeDate
from .. import exceptions as exceptions
from ..htmlcleanup import stripHTML
logger = logging.getLogger(__name__)
####################################################################################################
def getClass():
return UnknowableRoomOrgSiteAdapter
####################################################################################################
class UnknowableRoomOrgSiteAdapter(BaseSiteAdapter):
################################################################################################
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.story.setMetadata('siteabbrev','urorg')
# 1252 is a superset of iso-8859-1. Most sites that claim to be iso-8859-1 (and some that
# claim to be utf8) are really windows-1252.
self.decode = ["Windows-1252", "utf8", "iso-8859-1"]
# Setting the adult status to false initially
self.is_adult=False
# get storyId from url
self.story.setMetadata('storyId',self.parsedUrl.path.split('/',)[1])
# normalized story URL.
self._setURL('http://'+self.getSiteDomain()+'/'+self.story.getMetadata('storyId') + '/1')
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%B %d, %Y"
################################################################################################
@staticmethod
def getSiteDomain():
return 'unknowableroom.org'
################################################################################################
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/1234/1"
################################################################################################
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain())+r"/\d+/\d"
################################################################################################
def get_page(self, page):
'''
This will download the url from the web and return the data
I'm using it since I call several pages below, and this will cut down
on the size of the file
'''
try:
page_data = self._fetchUrl(page)
except HTTPError as e:
if e.code == 404:
raise exceptions.StoryDoesNotExist('404 error: {}'.format(page))
else:
raise e
return page_data
################################################################################################
def extractChapterUrlsAndMetadata(self):
## There is no way to tell if a fic is complete or not, so we can't set the status, which
# will default to 'Unknown'
url = self.url
logger.debug("URL: "+url)
data = self.get_page(url)
if "<!DOCTYPE html" not in data:
raise exceptions.StoryDoesNotExist(url)
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# Find authorid and URL from... author url.
a = soup.find('a', {'class':'user'})
if a:
self.story.setMetadata('authorId',a['href'].split('/')[-1])
self.story.setMetadata('authorUrl','http://'+self.host+a['href']+'/fics')
self.story.setMetadata('author',a.string)
else:
author = soup.find('h1').string
author = author[author.rfind('by')+2:].strip()
self.story.setMetadata('authorId', author)
self.story.setMetadata('authorUrl', 'http://'+self.getSiteDomain())
self.story.setMetadata('author', author)
## Title
rawtitle = stripHTML(soup.find('h1')).replace(
'by '+self.story.getMetadata('author'), '').strip()
self.story.setMetadata('title',rawtitle)
# Find the chapters:
for chapter in soup.find('select').find_all('option', value=re.compile(
'/'+self.story.getMetadata('storyId')+r'/\d+')):
# just in case there's tags, like <i> in chapter titles.
self.add_chapter(chapter,'http://'+self.host+chapter['value'])
## One chapter stories do not have a listing for the chapters, so we have to check to make
## sure, and if there aren't any chapters, we set it to the Url entered.
if self.num_chapters() == 0:
self.add_chapter(self.story.getMetadata('title'), url)
# Most of the metadata can be gotten from the story page, but it can all be gotten from the
# author's fic page, so we are going to get it from there. Unless there is no author page,
# then we have to use what we can get.
if self.story.getMetadata('authorUrl') != 'http://'+self.getSiteDomain():
adata = self.get_page(self.story.getMetadata('authorUrl'))
asoup = self.make_soup(adata)
story_found = False
for story in asoup.find('ul', {'id':'fic_list'}).find_all('li'):
if rawtitle == stripHTML(story.a):
story_found = True
break
else:
story_found = False
if not story_found:
raise exceptions.StoryDoesNotExist("Cannot find story '{}' on author's page '{}'".format(
url, self.story.getMetadata('authorUrl')))
if story_found:
self.setDescription(url, stripHTML(story.p).strip())
# The metadata is contained in a <cite> tag, with only a bold tag and seperated by a
# period (.).
# It has 6 'elements'
# 0 = Rating
# 1 = chapters and words
# 2 = Genre
# 3 = Characters
# 4 = Posted Date
# 5 = Updated Date
metad = stripHTML(story.cite).replace('.,', ',').split('.')
self.story.setMetadata('rating',metad[0])
self.story.setMetadata('numWords', metad[1].split()[2])
self.story.setMetadata('genre',metad[2])
self.story.setMetadata('characters',metad[3])
# The dates have letters in them, so we have to remove them.
date_pub = metad[4].replace('Created ','').replace('st,', ',').replace('nd,', ',').replace(
'rd,', ',').replace('th,', ',').strip()
date_upd = metad[5].replace('Updated ','').replace('st,', ',').replace('nd,', ',').replace(
'rd,', ',').replace('th,', ',').strip()
self.story.setMetadata('datePublished', makeDate(date_pub, self.dateformat))
self.story.setMetadata('dateUpdated', makeDate(date_pub, self.dateformat))
# else:
if not self.story.getMetadata('rating'):
# There was no author page, so we get what we can from the page
self.setDescription(url, '>>>>>>>>>> No Summary Found <<<<<<<<<<')
metad = soup.find('div', {'class':'info'})
for mdata in metad.find_all('b'):
if mdata.string == 'Rating:':
self.story.setMetadata('rating', mdata.next_sibling)
elif mdata.string == 'Created:':
value = mdata.next_sibling.replace('st,', ',').replace('nd,', ',').replace(
'rd,', ',').replace('th,', ',').replace('.', '').strip()
self.story.setMetadata('datePublished', makeDate(value, self.dateformat))
elif mdata.string == 'Updated:':
value = mdata.next_sibling.replace('st,', ',').replace('nd,', ',').replace(
'rd,', ',').replace('th,', ',').replace('.', '').strip()
self.story.setMetadata('dateUpdated', makeDate(value, self.dateformat))
# I'm going to add the disclaimer
disclaimer = soup.find('strong', {'id':'disclaimer'})
if disclaimer:
self.story.setMetadata('disclaimer', stripHTML(disclaimer).replace(
'Disclaimer:', '').strip())
################################################################################################
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
data = self.get_page(url)
soup = self.make_soup(data)
story = soup.find('div', {'id' : 'fic'})
if not story:
raise exceptions.FailedToDownload(
"Error downloading Chapter: %s! Missing required element!" % url)
## I'm going to take the attributes off all of the tags
## because they usually refer to the style that we removed above.
for tag in story.findAll('p')+story.findAll('span'):
tag.attrs = None
return self.utf8FromSoup(url, story)
|
import os
import torch
import numpy as np
import numpy.random as rd
class ReplayBuffer:
def __init__(self, max_len, state_dim, action_dim, if_use_per, gpu_id=0, state_type=torch.float32):
"""Experience Replay Buffer
save environment transition in a continuous RAM for high performance training
we save trajectory in order and save state and other (action, reward, mask, ...) separately.
`int max_len` the maximum capacity of ReplayBuffer. First In First Out
`int state_dim` the dimension of state
`int action_dim` the dimension of action (action_dim==1 for discrete action)
`bool if_on_policy` on-policy or off-policy
`bool if_gpu` create buffer space on CPU RAM or GPU
`bool if_per` Prioritized Experience Replay for sparse reward
"""
self.now_len = 0
self.next_id = 0
self.if_full = False
self.max_len = max_len
self.data_type = torch.float32
self.action_dim = action_dim
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.per_tree = BinarySearchTree(max_len) if if_use_per else None
other_dim = 1 + 1 + self.action_dim
self.buf_other = torch.empty((max_len, other_dim), dtype=torch.float32, device=self.device)
buf_state_shape = (max_len, state_dim) if isinstance(state_dim, int) else (max_len, *state_dim)
self.buf_state = torch.empty(buf_state_shape, dtype=state_type, device=self.device)
def append_buffer(self, state, other): # CPU array to CPU array
self.buf_state[self.next_id] = state
self.buf_other[self.next_id] = other
if self.per_tree:
self.per_tree.update_id(self.next_id)
self.next_id += 1
if self.next_id >= self.max_len:
self.if_full = True
self.next_id = 0
def extend_buffer(self, state, other):
size = len(other)
next_idx = self.next_id + size
if self.per_tree:
self.per_tree.update_ids(data_ids=np.arange(self.next_id, next_idx) % self.max_len)
if next_idx > self.max_len:
self.buf_state[self.next_id:self.max_len] = state[:self.max_len - self.next_id]
self.buf_other[self.next_id:self.max_len] = other[:self.max_len - self.next_id]
self.if_full = True
next_idx = next_idx - self.max_len
self.buf_state[0:next_idx] = state[-next_idx:]
self.buf_other[0:next_idx] = other[-next_idx:]
else:
self.buf_state[self.next_id:next_idx] = state
self.buf_other[self.next_id:next_idx] = other
self.next_id = next_idx
def sample_batch(self, batch_size) -> tuple:
"""randomly sample a batch of data for training
:int batch_size: the number of data in a batch for Stochastic Gradient Descent
:return torch.Tensor reward: reward.shape==(now_len, 1)
:return torch.Tensor mask: mask.shape ==(now_len, 1), mask = 0.0 if done else gamma
:return torch.Tensor action: action.shape==(now_len, action_dim)
:return torch.Tensor state: state.shape ==(now_len, state_dim)
:return torch.Tensor state: state.shape ==(now_len, state_dim), next state
"""
if self.per_tree:
beg = -self.max_len
end = (self.now_len - self.max_len) if (self.now_len < self.max_len) else None
indices, is_weights = self.per_tree.get_indices_is_weights(batch_size, beg, end)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1].type(torch.float32), # reward
r_m_a[:, 1:2].type(torch.float32), # mask
r_m_a[:, 2:].type(torch.float32), # action
self.buf_state[indices].type(torch.float32), # state
self.buf_state[indices + 1].type(torch.float32), # next state
torch.as_tensor(is_weights, dtype=torch.float32, device=self.device)) # important sampling weights
else:
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 1:2], # mask
r_m_a[:, 2:], # action
self.buf_state[indices],
self.buf_state[indices + 1])
def sample_batch_one_step(self, batch_size) -> tuple:
if self.per_tree:
beg = -self.max_len
end = (self.now_len - self.max_len) if (self.now_len < self.max_len) else None
indices, is_weights = self.per_tree.get_indices_is_weights(batch_size, beg, end)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1].type(torch.float32), # reward
r_m_a[:, 2:].type(torch.float32), # action
self.buf_state[indices].type(torch.float32), # state
torch.as_tensor(is_weights, dtype=torch.float32, device=self.device)) # important sampling weights
else:
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 2:], # action
self.buf_state[indices],)
def update_now_len(self):
"""update the a pointer `now_len`, which is the current data number of ReplayBuffer
"""
self.now_len = self.max_len if self.if_full else self.next_id
def print_state_norm(self, neg_avg=None, div_std=None): # non-essential
"""print the state norm information: state_avg, state_std
We don't suggest to use running stat state.
We directly do normalization on state using the historical avg and std
eg. `state = (state + self.neg_state_avg) * self.div_state_std` in `PreprocessEnv.step_norm()`
neg_avg = -states.mean()
div_std = 1/(states.std()+1e-5) or 6/(states.max()-states.min())
:array neg_avg: neg_avg.shape=(state_dim)
:array div_std: div_std.shape=(state_dim)
"""
max_sample_size = 2 ** 14
'''check if pass'''
state_shape = self.buf_state.shape
if len(state_shape) > 2 or state_shape[1] > 64:
print(f"| print_state_norm(): state_dim: {state_shape} is too large to print its norm. ")
return None
'''sample state'''
indices = np.arange(self.now_len)
rd.shuffle(indices)
indices = indices[:max_sample_size] # len(indices) = min(self.now_len, max_sample_size)
batch_state = self.buf_state[indices]
'''compute state norm'''
if isinstance(batch_state, torch.Tensor):
batch_state = batch_state.cpu().data.numpy()
assert isinstance(batch_state, np.ndarray)
if batch_state.shape[1] > 64:
print(f"| _print_norm(): state_dim: {batch_state.shape[1]:.0f} is too large to print its norm. ")
return None
if np.isnan(batch_state).any(): # 2020-12-12
batch_state = np.nan_to_num(batch_state) # nan to 0
ary_avg = batch_state.mean(axis=0)
ary_std = batch_state.std(axis=0)
fix_std = ((np.max(batch_state, axis=0) - np.min(batch_state, axis=0)) / 6 + ary_std) / 2
if neg_avg is not None: # norm transfer
ary_avg = ary_avg - neg_avg / div_std
ary_std = fix_std / div_std
print(f"print_state_norm: state_avg, state_std (fixed)")
print(f"avg = np.{repr(ary_avg).replace('=float32', '=np.float32')}")
print(f"std = np.{repr(ary_std).replace('=float32', '=np.float32')}")
def td_error_update(self, td_error):
self.per_tree.td_error_update(td_error)
def save_or_load_history(self, cwd, if_save, buffer_id=0): # [ElegantRL.2021.11.11]
save_path = f"{cwd}/buffer_{buffer_id}.npz"
if_load = None
if if_save:
self.update_now_len()
state_dim = self.buf_state.shape[1]
other_dim = self.buf_other.shape[1]
buf_state_data_type = np.float16 \
if self.buf_state.dtype in {np.float, np.float64, np.float32} \
else np.uint8
buf_state = np.empty((self.now_len, state_dim), dtype=buf_state_data_type)
buf_other = np.empty((self.now_len, other_dim), dtype=np.float16)
temp_len = self.now_len - self.next_id
buf_state[0:temp_len] = self.buf_state[self.next_id:self.now_len].cpu().numpy()
buf_other[0:temp_len] = self.buf_other[self.next_id:self.now_len].cpu().numpy()
buf_state[temp_len:] = self.buf_state[:self.next_id].detach().cpu().numpy()
buf_other[temp_len:] = self.buf_other[:self.next_id].detach().cpu().numpy()
np.savez_compressed(save_path, buf_state=buf_state, buf_other=buf_other)
print(f"| ReplayBuffer save in: {save_path}")
elif os.path.isfile(save_path):
buf_dict = np.load(save_path)
buf_state = buf_dict['buf_state']
buf_other = buf_dict['buf_other']
bs = 512
for i in range(0, buf_state.shape[0], bs):
tmp_state = torch.as_tensor(buf_state[i:i + bs], dtype=torch.float32, device=self.device)
tmp_other = torch.as_tensor(buf_other[i:i + bs], dtype=torch.float32, device=self.device)
self.extend_buffer(tmp_state, tmp_other)
self.update_now_len()
print(f"| ReplayBuffer load: {save_path}")
if_load = True
else:
# print(f"| ReplayBuffer FileNotFound: {save_path}")
if_load = False
return if_load
class ReplayBufferMP:
def __init__(self, state_dim, action_dim, max_len, if_use_per, buffer_num, gpu_id):
"""Experience Replay Buffer for Multiple Processing
`int max_len` the max_len of ReplayBuffer, not the total len of ReplayBufferMP
`int worker_num` the rollout workers number
"""
self.now_len = 0
self.max_len = max_len
self.worker_num = buffer_num
buf_max_len = max_len // buffer_num
self.buffers = [ReplayBuffer(max_len=buf_max_len, state_dim=state_dim, action_dim=action_dim,
if_use_per=if_use_per, gpu_id=gpu_id)
for _ in range(buffer_num)]
def sample_batch(self, batch_size) -> list:
bs = batch_size // self.worker_num
list_items = [self.buffers[i].sample_batch(bs)
for i in range(self.worker_num)]
# list_items of reward, mask, action, state, next_state
# list_items of reward, mask, action, state, next_state, is_weights (PER)
list_items = list(map(list, zip(*list_items))) # 2D-list transpose
return [torch.cat(item, dim=0) for item in list_items]
def sample_batch_one_step(self, batch_size) -> list:
bs = batch_size // self.worker_num
list_items = [self.buffers[i].sample_batch_one_step(bs)
for i in range(self.worker_num)]
# list_items of reward, mask, action, state, next_state
# list_items of reward, mask, action, state, next_state, is_weights (PER)
list_items = list(map(list, zip(*list_items))) # 2D-list transpose
return [torch.cat(item, dim=0) for item in list_items]
def update_now_len(self):
self.now_len = 0
for buffer in self.buffers:
buffer.update_now_len()
self.now_len += buffer.now_len
def print_state_norm(self, neg_avg=None, div_std=None): # non-essential
# for buffer in self.l_buffer:
self.buffers[0].print_state_norm(neg_avg, div_std)
def td_error_update(self, td_error):
td_errors = td_error.view(self.worker_num, -1, 1)
for i in range(self.worker_num):
self.buffers[i].per_tree.td_error_update(td_errors[i])
def save_or_load_history(self, cwd, if_save):
for i in range(self.worker_num):
self.buffers[i].save_or_load_history(cwd, if_save, buffer_id=i)
class SharedReplayBuffer(object):
"""
Buffer to store training data.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param num_agents: (int) number of agents in the env.
:param obs_space: (gym.Space) observation space of agents.
:param cent_obs_space: (gym.Space) centralized observation space of agents.
:param act_space: (gym.Space) action space for agents.
"""
def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(cent_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape),
dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size),
dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n),
dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states_actor, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) action space for agents.
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states_actor.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
"""
Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
"""Copy last timestep data to first index. Called after update to model."""
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
"""Copy last timestep data to first index. This method is used for Hanabi."""
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
"""
Compute returns either as discounted sum of rewards, or using GAE.
:param next_value: (np.ndarray) value predictions for the step after the last episode step.
:param value_normalizer: (PopArt) If not None, PopArt value normalizer instance.
"""
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(
self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
"""
Yield training data for MLP policies.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param mini_batch_size: (int) number of samples in each minibatch.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents,
n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
"""
Yield training data for non-chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * num_agents
assert n_rollout_threads * num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
"""
Yield training data for chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param data_chunk_length: (int) length of sequence chunks with which to train RNN.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i * mini_batch_size:(i + 1) * mini_batch_size] for i in range(num_mini_batch)]
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1,
*self.rnn_states_critic.shape[
3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind + data_chunk_length])
obs_batch.append(obs[ind:ind + data_chunk_length])
actions_batch.append(actions[ind:ind + data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind + data_chunk_length])
value_preds_batch.append(value_preds[ind:ind + data_chunk_length])
return_batch.append(returns[ind:ind + data_chunk_length])
masks_batch.append(masks[ind:ind + data_chunk_length])
active_masks_batch.append(active_masks[ind:ind + data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind + data_chunk_length])
adv_targ.append(advantages[ind:ind + data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
class BinarySearchTree:
"""Binary Search Tree for PER
Contributor: Github GyChou, Github mississippiu
Reference: https://github.com/kaixindelele/DRLib/tree/main/algos/pytorch/td3_sp
Reference: https://github.com/jaromiru/AI-blog/blob/master/SumTree.py
"""
def __init__(self, memo_len):
self.memo_len = memo_len # replay buffer len
self.prob_ary = np.zeros((memo_len - 1) + memo_len) # parent_nodes_num + leaf_nodes_num
self.max_len = len(self.prob_ary)
self.now_len = self.memo_len - 1 # pointer
self.indices = None
self.depth = int(np.log2(self.max_len))
# PER. Prioritized Experience Replay. Section 4
# alpha, beta = 0.7, 0.5 for rank-based variant
# alpha, beta = 0.6, 0.4 for proportional variant
self.per_alpha = 0.6 # alpha = (Uniform:0, Greedy:1)
self.per_beta = 0.4 # beta = (PER:0, NotPER:1)
def update_id(self, data_id, prob=10): # 10 is max_prob
tree_id = data_id + self.memo_len - 1
if self.now_len == tree_id:
self.now_len += 1
delta = prob - self.prob_ary[tree_id]
self.prob_ary[tree_id] = prob
while tree_id != 0: # propagate the change through tree
tree_id = (tree_id - 1) // 2 # faster than the recursive loop
self.prob_ary[tree_id] += delta
def update_ids(self, data_ids, prob=10): # 10 is max_prob
ids = data_ids + self.memo_len - 1
self.now_len += (ids >= self.now_len).sum()
upper_step = self.depth - 1
self.prob_ary[ids] = prob # here, ids means the indices of given children (maybe the right ones or left ones)
p_ids = (ids - 1) // 2
while upper_step: # propagate the change through tree
ids = p_ids * 2 + 1 # in this while loop, ids means the indices of the left children
self.prob_ary[p_ids] = self.prob_ary[ids] + self.prob_ary[ids + 1]
p_ids = (p_ids - 1) // 2
upper_step -= 1
self.prob_ary[0] = self.prob_ary[1] + self.prob_ary[2]
# because we take depth-1 upper steps, ps_tree[0] need to be updated alone
def get_leaf_id(self, v):
"""Tree structure and array storage:
Tree index:
0 -> storing priority sum
| |
1 2
| | | |
3 4 5 6 -> storing priority for transitions
Array type for storing: [0, 1, 2, 3, 4, 5, 6]
"""
parent_idx = 0
while True:
l_idx = 2 * parent_idx + 1 # the leaf's left node
r_idx = l_idx + 1 # the leaf's right node
if l_idx >= (len(self.prob_ary)): # reach bottom, end search
leaf_idx = parent_idx
break
else: # downward search, always search for a higher priority node
if v <= self.prob_ary[l_idx]:
parent_idx = l_idx
else:
v -= self.prob_ary[l_idx]
parent_idx = r_idx
return min(leaf_idx, self.now_len - 2) # leaf_idx
def get_indices_is_weights(self, batch_size, beg, end):
self.per_beta = min(1., self.per_beta + 0.001)
# get random values for searching indices with proportional prioritization
values = (rd.rand(batch_size) + np.arange(batch_size)) * (self.prob_ary[0] / batch_size)
# get proportional prioritization
leaf_ids = np.array([self.get_leaf_id(v) for v in values])
self.indices = leaf_ids - (self.memo_len - 1)
prob_ary = self.prob_ary[leaf_ids] / self.prob_ary[beg:end].min()
is_weights = np.power(prob_ary, -self.per_beta) # important sampling weights
return self.indices, is_weights
def td_error_update(self, td_error): # td_error = (q-q).detach_().abs()
prob = td_error.squeeze().clamp(1e-6, 10).pow(self.per_alpha)
prob = prob.cpu().numpy()
self.update_ids(self.indices, prob)
|
<gh_stars>1-10
class TreeFragment:
"""(Abstract) empty sentence fragment"""
def __init__(self, tree):
"""
Construct a sentence tree fragment which is merely a wrapper for
a list of Strings
Args:
tree (?): Base tree for the sentence fragment, type depends on
subclass, refer to those subclasses
"""
self._tree = tree
def tree(self):
"""Return the represented sentence tree as raw data."""
return self._tree
def expand(self):
"""
Expanded version of the fragment. In this case an empty sentence.
Returns:
List<List<str>>: A list with an empty sentence (= token/string list)
"""
return [[]]
def __str__(self):
return self._tree.__str__()
def __repr__(self):
return self._tree.__repr__()
class Word(TreeFragment):
"""
Single word in the sentence tree.
Construct with a string as argument.
"""
def expand(self):
"""
Creates one sentence that contains exactly that word.
Returns:
List<List<str>>: A list with the given string as sentence
(= token/string list)
"""
return [[self._tree]]
class Sentence(TreeFragment):
"""
A Sentence made of several concatenations/words.
Construct with a List<TreeFragment> as argument.
"""
def expand(self):
"""
Creates a combination of all sub-sentences.
Returns:
List<List<str>>: A list with all subsentence expansions combined in
every possible way
"""
old_expanded = [[]]
for sub in self._tree:
sub_expanded = sub.expand()
new_expanded = []
while len(old_expanded) > 0:
sentence = old_expanded.pop()
for new in sub_expanded:
new_expanded.append(sentence + new)
old_expanded = new_expanded
return old_expanded
class SentenceTree(TreeFragment):
"""
A Combination of possible sub-sentences.
Construct with List<TreeFragment> as argument.
"""
def expand(self):
"""
Returns all of its options as seperated sub-sentences.
Returns:
List<List<str>>: A list containing the sentences created by all
expansions of its sub-sentences
"""
options = []
for option in self._tree:
options.extend(option.expand())
return options
class SentenceTreeParser:
"""
Generate sentence token trees from a list of sentence
['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]
"""
def __init__(self, sentence):
# the syntax for .optionally is square brackets
# "hello [world]"
# this is equivalent to using .one_of
# "hello (world|)
sentence = sentence.replace("[", "(").replace("]", "|)")
self.sentence = sentence
def _parse(self):
"""
Generate sentence token trees
['1', '(', '2', '|', '3, ')'] -> ['1', ['2', '3']]
"""
self._current_position = 0
return self._parse_expr()
def _parse_expr(self):
"""
Generate sentence token trees from the current position to
the next closing parentheses / end of the list and return it
['1', '(', '2', '|', '3, ')'] -> ['1', [['2'], ['3']]]
['2', '|', '3'] -> [['2'], ['3']]
"""
# List of all generated sentences
sentence_list = []
# Currently active sentence
cur_sentence = []
sentence_list.append(Sentence(cur_sentence))
# Determine which form the current expression has
while self._current_position < len(self.sentence):
cur = self.sentence[self._current_position]
self._current_position += 1
if cur == '(':
# Parse the subexpression
subexpr = self._parse_expr()
# Check if the subexpression only has one branch
# -> If so, append "(" and ")" and add it as is
normal_brackets = False
if len(subexpr.tree()) == 1:
normal_brackets = True
cur_sentence.append(Word('('))
# add it to the sentence
cur_sentence.append(subexpr)
if normal_brackets:
cur_sentence.append(Word(')'))
elif cur == '|':
# Begin parsing a new sentence
cur_sentence = []
sentence_list.append(Sentence(cur_sentence))
elif cur == ')':
# End parsing the current subexpression
break
# TODO anything special about {sth}?
else:
cur_sentence.append(Word(cur))
return SentenceTree(sentence_list)
def expand_parentheses(self):
tree = self._parse()
return tree.expand()
def expand_parentheses(sent):
"""
['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]
For example:
Will it (rain|pour) (today|tomorrow|)?
---->
Will it rain today?
Will it rain tomorrow?
Will it rain?
Will it pour today?
Will it pour tomorrow?
Will it pour?
Args:
sent (list<str>): List of sentence in sentence
Returns:
list<list<str>>: Multiple possible sentences from original
"""
expanded = SentenceTreeParser(sent).expand_parentheses()
return ["".join(_).strip() for _ in expanded]
|
# -*- coding: utf-8 -*-
"""
oy.models.mixins.polymorphic_prop
~~~~~~~~~~
Provides helper mixin classes for special sqlalchemy models
:copyright: (c) 2018 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import sqlalchemy.types as types
from sqlalchemy import literal_column, event
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.declarative import declared_attr
from oy.boot.sqla import db
class ProxiedDictMixin(object):
"""Adds obj[key] access to a mapped class.
This class basically proxies dictionary access to an attribute
called ``_proxied``. The class which inherits this class
should have an attribute called ``_proxied`` which points to a dictionary.
"""
def __len__(self):
return len(self._proxied)
def __iter__(self):
return iter(self._proxied)
def __getitem__(self, key):
return self._proxied[key]
def __contains__(self, key):
return key in self._proxied
def get(self, key):
return self._proxied.get(key)
def __setitem__(self, key, value):
self._proxied[key] = value
def __delitem__(self, key):
del self._proxied[key]
class ImmutableProxiedDictMixin(ProxiedDictMixin):
"""Like :class:`ProxiedDictMixin` but disables the addition of
new keys and deletion of existing ones
"""
def __setitem__(self, key, value):
if key not in self._proxied:
raise AttributeError("Cann't Set Attribute")
self._proxied[key] = value
def __delitem__(self, key):
raise AttributeError("Deleting is not allowed")
class PolymorphicVerticalProperty(object):
"""A key/value pair with polymorphic value storage.
The class which is mapped should indicate typing information
within the "info" dictionary of mapped Column objects.
"""
def __init__(self, key=None, value=None):
self.key = key
self.value = value
@hybrid_property
def value(self):
fieldname, discriminator = self.type_map[self.type]
if fieldname is None:
return None
else:
return getattr(self, fieldname)
@value.setter
def value(self, value):
py_type = type(value)
fieldname, discriminator = self.type_map[py_type]
self.type = discriminator
if fieldname is not None:
setattr(self, fieldname, value)
@value.deleter
def value(self):
self._set_value(None)
@value.comparator
class value(PropComparator):
"""A comparator for .value, builds a polymorphic comparison via CASE.
"""
def __init__(self, cls):
self.cls = cls
def _case(self):
pairs = set(self.cls.type_map.values())
whens = [
(
literal_column("'%s'" % discriminator),
cast(getattr(self.cls, attribute), String),
)
for attribute, discriminator in pairs
if attribute is not None
]
return case(whens, self.cls.type, null())
def __eq__(self, other):
return self._case() == cast(other, String)
def __ne__(self, other):
return self._case() != cast(other, String)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.value)
@event.listens_for(PolymorphicVerticalProperty, "mapper_configured", propagate=True)
def on_new_class(mapper, cls_):
"""Look for Column objects with type info in them, and work up
a lookup table.
"""
info_dict = {}
info_dict[type(None)] = (None, "none")
info_dict["none"] = (None, "none")
for k in mapper.c.keys():
col = mapper.c[k]
if "type" in col.info:
python_type, discriminator = col.info["type"]
if type(python_type) in (list, tuple):
for pty in python_type:
info_dict[pty] = (k, discriminator)
else:
info_dict[python_type] = (k, discriminator)
info_dict[discriminator] = (k, discriminator)
cls_.type_map = info_dict
class DynamicProp(PolymorphicVerticalProperty):
key = db.Column(db.String(128), nullable=False)
type = db.Column(db.String(64))
int_value = db.Column(db.Integer, info={"type": (int, "integer")})
str_value = db.Column(db.Unicode(5120), info={"type": (str, "string")})
bool_value = db.Column(db.Boolean, info={"type": (bool, "boolean")})
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Various utils and helpes used by AS3 Ninja
"""
# pylint: disable=C0330 # Wrong hanging indentation before block
# pylint: disable=C0301 # Line too long
# pylint: disable=C0116 # Missing function or method docstring
import json
import sys
from functools import wraps
from typing import Any, ItemsView, Iterator, KeysView, Optional, Union, ValuesView
import yaml
def deserialize(datasource: str) -> dict:
"""
deserialize de-serializes JSON or YAML from a file to a python dict.
A ValueError exception is raised if JSON and YAML de-serialization fails.
:param datasource: A file or data to deserialize
"""
with open(datasource, "r") as jy_file:
data = jy_file.read()
try:
_data = json.loads(data)
except (json.JSONDecodeError, TypeError):
try:
_data = yaml.safe_load(data)
except (yaml.parser.ParserError, yaml.scanner.ScannerError):
_data = None
if not isinstance(_data, dict):
raise ValueError("deserialize: Could not deserialize datasource.")
return _data
class DictLike:
"""Makes objects `feel` like a dict.
Implements required dunder methods and common methods used to access dict data.
"""
_dict: dict = {}
def __iter__(self) -> Iterator[str]:
for key in self._dict:
yield key
def __len__(self) -> int:
return len(self._dict)
def __contains__(self, item: Any) -> bool:
return item in self._dict
def __eq__(self, other: Any) -> bool:
return self._dict.items() == other.items()
def __getitem__(self, key: str) -> Any:
return self._dict.__getitem__(key)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._dict})"
def __str__(self) -> str:
return str(self._dict)
def get(self, key: Any, default: Any = None) -> Any:
return self._dict.get(key, default)
def keys(self) -> KeysView[Any]:
return self._dict.keys()
def values(self) -> ValuesView[Any]:
return self._dict.values()
def items(self) -> ItemsView[Any, Any]:
return self._dict.items()
def failOnException(wrapped_function):
"""sys.exit(1) on any exception"""
@wraps(wrapped_function)
def failOnException_wrapper(*args, **kwargs):
"""wrapper function"""
try:
return wrapped_function(*args, **kwargs)
except Exception: # pylint: disable=W0703
sys.exit(1)
return failOnException_wrapper
def escape_split(string_to_split: str, seperator: str = ".") -> tuple:
"""Splits a string based on the provided seperator.
escape_split supports escaping the seperator by prepending a backslash.
:param string_to_split: String to split
:param seperator: Seperator to use for splitting (Default: ".")
"""
i, res, buffer = 0, [], ""
while True:
j, e = string_to_split.find(seperator, i), 0
if j < 0:
return tuple(res + [buffer + string_to_split[i:]])
while j - e and string_to_split[j - e - 1] == "\\":
e += 1
d = e // 2
if e != d * 2:
buffer += string_to_split[i : j - d - 1] + string_to_split[j]
i = j + 1
continue
res.append(buffer + string_to_split[i : j - d])
i = j + len(seperator)
buffer = ""
class PathAccessError(KeyError, IndexError, TypeError):
"""An amalgamation of KeyError, IndexError, and TypeError,
representing what can occur when looking up a path in a nested
object.
"""
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
return f"{self.__class__.__name__}({self.exc}, {self.seg}, {self.path})"
def __str__(self):
return (
f"could not access {self.seg} from path {self.path}, got error: {self.exc}"
)
def dict_filter(
dict_to_filter: dict, filter: Optional[Union[tuple, str]] = None
) -> Any:
"""Filters a dict based on the provided filter.
dict_filter will walk the dict keys based on the filter and will return the value of the last key.
If filter is empty, dict_to_filter will be returned.
Example:
assert dict_filter({ 'a': { 'b': [1,2,3] } }, filter="a.b") == [1,2,3]
:param dict_to_filter: Python dict to filter
:param filter: Filter to apply to the dict. Filter can be a ``str`` (will be split on `.`) or a ``tuple``.
"""
if filter:
if isinstance(filter, str):
filter = escape_split(filter)
for seg in filter:
try:
dict_to_filter = dict_to_filter[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, filter)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
dict_to_filter = dict_to_filter[seg]
except (ValueError, KeyError, IndexError, TypeError):
try:
iter(dict_to_filter)
except TypeError:
exc = TypeError(
f"{type(dict_to_filter).__name__} object is not indexable"
)
raise PathAccessError(exc, seg, filter)
return dict_to_filter
# pylint: disable=W0105 # String statement has no effect
"""
PathAccessError and dict_filter are based on boltons iterutils: https://github.com/mahmoud/boltons
boltons license:
Copyright (c) 2013, <NAME>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
|
<reponame>yuxuan-du/Robust-quantum-classifier
import pennylane as qml
from pennylane import numpy as np
import os
# load synthetic dataset based on the paper 'Supervised learning with quantum-enhanced feature spaces'
data_all = np.load('data.npy')
label_all = np.load('label.npy')
data_train, label_train = data_all[:100], label_all[:100]
data_vali, label_vali = data_all[100:200], label_all[100:200]
data_test, label_test = data_all[200:300], label_all[200:300]
# number of qubits in the circuit
nr_qubits = 3
# number of layers in the circuit
nr_layers = 3
encode_layers = 1
size = 100
params = np.random.uniform(0, np.pi * 2, (nr_layers, nr_qubits))
params = np.reshape(params, newshape = (nr_layers, nr_qubits))
params_opt = np.zeros((nr_layers, nr_qubits))
vali_acc_base = 0
def train_result_record():
return {
'loss': [],
'train_acc': [],
'valid_acc': [],
'test_acc': []
}
records = train_result_record()
# encoder
def encode_layer(feature, j):
for i in range(nr_qubits):
qml.RY( feature[i], wires=i)
phi = (np.pi - feature[0].val)*(np.pi - feature[1].val)*(np.pi - feature[2].val)
def layer(params, j):
for i in range(nr_qubits):
qml.RY(params[j, i], wires=i)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev)
def circuit( params, feature, A=None):
for j in range(encode_layers):
encode_layer(feature, j)
for j in range(nr_layers):
layer(params, j)
return qml.expval(qml.Hermitian(A, wires=[0, 1]))
opt = qml.AdamOptimizer(0.05)
def cost_fn(params):
global data_train, label_train
loss = 0
indices = np.arange(data_train.shape[0]) #np.random.permutation(data_train.shape[0])
data_train = data_train[indices]
label_train = label_train[indices]
correct = 0
for data, label in zip(data_train[:size], label_train[:size]):
out = circuit( params, data, A=np.kron(np.eye(2), np.array([[1, 0], [0, 0]])))
loss += (label - out)**2
if (out < 0.5 and label == 0) or (out > 0.5 and label == 1):
correct += 1
loss /= size
print('loss: {} , acc: {} '.format(loss, correct / size))
records['train_acc'].append(correct / size)
records['loss'].append(loss._value)
return loss
def test_fn(params):
correct = 0
for data, label in zip(data_test, label_test):
out = circuit( params, data, A=np.kron(np.eye(2), np.array([[1, 0], [0, 0]])))
if (out < 0.5 and label == 0) or (out > 0.5 and label == 1):
correct += 1
print('Test acc: {}'.format(correct / label_test.shape[0]))
records['test_acc'].append(correct / label_test.shape[0])
def valid_fn(params):
correct = 0
for data, label in zip(data_vali, label_vali):
out = circuit( params, data, A=np.kron(np.eye(2), np.array([[1, 0], [0, 0]])))
if (out < 0.5 and label == 0) or (out > 0.5 and label == 1):
correct += 1
print('Valid acc: {}'.format(correct / label_vali.shape[0]))
records['valid_acc'].append(correct / label_vali.shape[0])
return correct / label_vali.shape[0]
for i in range(400):
print('Epoch {}'.format(i))
params = opt.step(cost_fn, params)
valid_acc = valid_fn(params)
test_fn(params)
if valid_acc > vali_acc_base:
params_opt = params
f = open('train_result' + '_noiselss'+ '.txt', 'w')
f.write(str(records))
f.close()
|
<filename>bot.py
from fuzzywuzzy import process
import logging, os, discord, asyncio,sys,csv
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
FIREFOX_PATH = r'C:\Program Files\Mozilla Firefox\firefox.exe'
GECKODRIVER_PATH = r'C:\geckodriver.exe'
cachefolder = os.getcwd() + '\cache\\'
cachetrigger = True;
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
client = discord.Client()
@client.event
async def on_ready():
print('logged in as')
print(client.user.name)
print(client.user.id)
print('-------')
@client.event
async def on_message(message):
if message.content.startswith('!help'):
await client.send_message(message.channel, 'Find item\'s tooltip :\n- "!finditem #NAME" - Example -> !finditem thunderfury\n- "!finditem #VANILLAGAMINGITEMID" - Example -> !finditem 18402')
await client.send_message(message.channel, 'Finding player :\n- "!findplayer #NAME"')
elif (message.content.startswith('!finditem')):
await client.send_message(message.channel, 'Looking for item...')
foundFile = False
delete = True
try:
newArgs = message.content.split(' ')
print (newArgs)
if (len(newArgs) >= 2):
if (len(newArgs) == 2):
if newArgs[1].isdigit():
itemid = newArgs[1]
else:
itemid = finditemidfromname(newArgs[1])
else:
name = ''
for i in range(1, len(newArgs)):
name += newArgs[i]
if i != len(newArgs):
name += ' '
itemid = finditemidfromname(name)
if findimagefromcache(itemid):
delete = False
else:
print('Downloading File')
takeimage(itemid)
try:
with open(cachefolder + itemid + '.png', 'rb') as f:
await client.send_file(message.channel, f, content=str('http://db.vanillagaming.org/?item=' + str(itemid)))
foundFile = True
except:
await client.send_message(message.channel, 'Error Finding Item, make sure you pass the right item ID')
else:
await client.send_message(message.channel, 'Command Error')
except ValueError:
await client.send_message(message.channel, 'Error Finding Item, make sure you passed the right parameters')
#If cache argument has not passed, delete the item after sending it to Discord
if delete and foundFile and cachetrigger is False:
os.remove(cachefolder + str(itemid) + '.png')
print(str(itemid) + '.png removed ')
elif (message.content.startswith('!findplayer')):
await client.send_message(message.channel, 'Looking for Player...')
try:
newArgs = message.content.split(' ')
if (len(newArgs) == 2):
try:
await client.send_message(message.channel, findplayer(newArgs[1]))
except:
await client.send_message(message.channel, "Couldn't find player")
except:
await client.send_message(message.channel, "Couldn't find player")
def takeimage(itemID):
#setup browser for running in background
os.environ['MOZ_HEADLESS'] = '1'
binary = FirefoxBinary(FIREFOX_PATH, log_file=sys.stdout)
binary.add_command_line_options('-headless')
browser = webdriver.Firefox(executable_path=GECKODRIVER_PATH,firefox_binary=binary)
#request item url
browser.get('http://db.vanillagaming.org/?item=' + itemID)
try:
browser.find_element_by_class_name('tooltip').screenshot(cachefolder + str(itemID) + '.png')
print('Tooltip for item id : %s found at %s\nSaved at %s' % (itemID, str('http://db.vanillagaming.org/?item=' + str(itemID)), str(cachefolder+ str(itemID) + '.png')))
except:
print('Tooltip for item id : %s not found at %s' % (itemID, str('http://db.vanillagaming.org/?item=' + str(itemID))))
browser.close()
def findplayer(playerName):
realmPlayers = 'http://realmplayers.com/CharacterViewer.aspx?realm=LB&player='
return (realmPlayers + playerName)
def findimagefromcache(itemID):
filename = itemID + '.png'
print('Trying to find ' + filename)
for files in os.walk(cachefolder):
for file in files:
if filename in file:
print('Item found in cache folder')
return True
print('Item not found in cache folder')
return False
def finditemidfromname(name):
global items
if not bool(items):
items = inititemsdict()
return items[process.extractOne(name, items.keys())[0]]
def inititemsdict():
items = {}
with open('items.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
items[row['name']] = row['entry']
return items
if __name__ == '__main__':
myargs = sys.argv
if '-nc' in myargs:
cachetrigger = False
if not os.path.exists(os.path.dirname(cachefolder)):
try:
os.makedirs(os.path.dirname(cachefolder))
except:
print('Error while creating the cache folder')
print('Cache is {0}'.format(cachetrigger))
print(myargs)
global items
items = inititemsdict()
client.run('token')
|
# -*- coding: utf-8 -*-
import scrapy
from lianjia.items import ResidenceInfoItem
import datetime
from lianjia.Exception.emailSender import emailSender
city_dict = {
'bj.lianjia': u'北京', 'sh.lianjia': u'上海', 'xm.lianjia': u'厦门', 'nj.lianjia': u'南京', 'cd.lianjia': u'成都', 'qd.lianjia': u'青岛',
'wh.lianjia': u'武汉', 'jn.lianjia': u'济南', 'hf.lianjia': u'合肥', 'xa.lianjia': u'西安', 'gz.lianjia': u'广州', 'su.lianjia': u'苏州',
'cq.lianjia': u'重庆', 'lf.lianjia': u'廊坊', 'tj.lianjia': u'天津', 'cs.lianjia': u'长沙', 'wx.lianjia': u'无锡', 'sy.lianjia': u'沈阳',
'zz.lianjia': u'郑州', 'nb.lianjia': u'宁波', 'hz.lianjia': u'杭州', 'sz.lianjia': u'深圳', 'km.lianjia': u'昆明', 'jh.lianjia': u'金华',
'lz.lianjia': u'兰州', 'fs.lianjia': u'佛山', 'dl.lianjia': u'大连', 'nn.lianjia': u'南宁', 'dg.lianjia': u'东莞', 'sjz.lianjia': u'石家庄',
'wz.lianjia': u'温州'
}
class LjXiaoquSpider(scrapy.Spider):
name = "lj_xiaoqu"
allowed_domain = [
'https://bj.lianjia.com',
'https://lf.lianjia.com',
'https://cd.lianjia.com',
'https://jn.lianjia.com',
'https://nj.lianjia.com',
'https://qd.lianjia.com',
'https://sh.lianjia.com',
'http://sh.lianjia.com',
'https://hf.lianjia.com',
'https://wh.lianjia.com',
'https://xm.lianjia.com',
'https://xa.lianjia.com',
'https://gz.lianjia.com',
'https://cq.lianjia.com',
'http://su.lianjia.com'
]
start_urls = {
'https://bj.lianjia.com/xiaoqu',
'https://sh.lianjia.com/xiaoqu',
'https://tj.lianjia.com/xiaoqu',
'https://cs.lianjia.com/xiaoqu',
'https://xa.lianjia.com/xiaoqu',
'https://wh.lianjia.com/xiaoqu',
'https://nj.lianjia.com/xiaoqu',
'https://wx.lianjia.com/xiaoqu',
'https://cd.lianjia.com/xiaoqu',
'https://sy.lianjia.com/xiaoqu',
'https://qd.lianjia.com/xiaoqu',
'https://zz.lianjia.com/xiaoqu',
'https://nb.lianjia.com/xiaoqu',
'https://hz.lianjia.com/xiaoqu',
'https://sz.lianjia.com/xiaoqu',
'https://km.lianjia.com/xiaoqu',
'https://jh.lianjia.com/xiaoqu',
'https://lz.lianjia.com/xiaoqu',
'https://fs.lianjia.com/xiaoqu',
'https://dl.lianjia.com/xiaoqu',
'https://nn.lianjia.com/xiaoqu',
'https://dg.lianjia.com/xiaoqu',
'https://sz.lianjia.com/xiaoqu',
'https://cq.lianjia.com/xiaoqu',
'https://sjz.lianjia.com/xiaoqu',
'https://wz.lianjia.com/xiaoqu',
# 'https://lf.lianjia.com/xiaoqu',
# 'https://bj.lianjia.com/xiaoqu',
# 'https://cd.lianjia.com/xiaoqu',
# 'https://cq.lianjia.com/xiaoqu',
# 'https://jn.lianjia.com/xiaoqu',
# 'https://nj.lianjia.com/xiaoqu',
# 'https://qd.lianjia.com/xiaoqu',
# 'https://hf.lianjia.com/xiaoqu'
# 'http://sh.lianjia.com/xiaoqu',
# 'https://sjz.lianjia.com/xiaoqu',
# 'https://wh.lianjia.com/xiaoqu',
# 'https://xm.lianjia.com/xiaoqu',
# 'https://xa.lianjia.com/xiaoqu',
# 'https://gz.lianjia.com/xiaoqu',
# 'http://su.lianjia.com/xiaoqu',
}
# 获取区域链接
def parse(self, response):
u = response.url
URL = u.split(r"/xiaoqu/")[0]
urls = response.xpath(
'//div[@class="position"]/dl[2]/dd/div[@data-role="ershoufang"]/div/a/@href').extract()
for url in urls:
if "https" not in url:
new_url = URL + url
yield scrapy.Request(url=new_url, callback=self.parse_community)
# 获取小区链接
def parse_community(self, response):
item = ResidenceInfoItem()
select = scrapy.Selector(response)
li = select.xpath("/html/body/div[4]/div[1]/ul//li")
for l in li:
url = l.xpath("a/@href").extract_first()
# item['residence_name'] = l.xpath("div[@class='info']/div/a/text()").extract_first()
item['district'] = l.xpath(
"div/div/a[@class='district']/text()").extract_first()
item['community'] = l.xpath(
"div/div/a[@class='bizcircle']/text()").extract_first()
yield scrapy.Request(url, meta={'key': item}, callback=self.parse_residence_info)
page_box = select.xpath(
'//div[@class="page-box house-lst-page-box"]').extract_first()
if page_box is not None:
totalPage = eval(scrapy.Selector(text=page_box).xpath(
'//@page-data').extract_first())['totalPage']
curPage = eval(scrapy.Selector(text=page_box).xpath(
'//@page-data').extract_first())['curPage']
if totalPage > curPage:
yield scrapy.Request(
response.url[0:response.url.find(
'/', 30) + 1] + 'pg' + str(curPage + 1) + '/',
callback=self.parse_community
)
def parse_residence_info(self, response):
select = scrapy.Selector(response)
item = response.meta['key']
# 小区地址,别名
name_str = select.xpath(
'//*[@class="detailDesc"]/text()').extract_first()
item['address'] = name_str
# if r'(' in name_str:
# address = re.findall(r'\((.*?)\)', name_str)[0]
# item['address'] = address
# alias = name_str[len(address)+2:]
# item['alias'] = alias
# else:
# item['address'] = name_str
# item['alias'] = u"无"
# 小区经纬度
item['coordinate'] = select.xpath(
'//*[@class="xiaoquInfoContent"]/span/@xiaoqu').extract_first()
# 建成时间
item['build_time'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[1]/span[2]/text()').extract_first()
# 物业费
item['property_price'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[3]/span[2]/text()').extract_first()
# 物业公司
item['property_company'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[4]/span[2]/text()').extract_first()
# 开发商
item['developer'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[5]/span[2]/text()').extract_first()
# 楼栋总数
item['total_buildings'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[6]/span[2]/text()').extract_first()
# 总户数
item['total_houses'] = select.xpath(
'//*[@class="xiaoquInfo"]/div[7]/span[2]/text()').extract_first()
# 小区名字
item['residence_name'] = select.xpath(
'//*[@class="detailTitle"]/text()').extract_first()
# 业务时间
item['bsn_dt'] = str(datetime.date.today())
# 抓取时间
item['crawl_time'] = datetime.datetime.now().strftime('%Y-%m-%d %X')
# 入库时间戳
item['tms'] = datetime.datetime.now().strftime('%Y-%m-%d %X')
item['webst_nm'] = u'链家'
# 城市
for key in city_dict.keys():
if key in response.url:
item['city'] = city_dict[key]
item['url'] = response.url
yield item
@staticmethod
def close(spider, reason):
print "closed!!!!!"
# emailSenderClient = emailSender()
# toSendEmailLst = ['<EMAIL>']
# finishTime = datetime.datetime.now().strftime('%Y-%m-%d %X')
# subject = u"爬虫结束状态汇报"
# body = u"爬虫结束状态汇报:\n\
# 爬虫名称:" + spider.name + u"\n\
# 结束原因:" + reason +u"\n\
# 结束时间:" + finishTime
# emailSenderClient.sendEmail(toSendEmailLst, subject, body, spider)
custom_settings = {
'DOWNLOADER_MIDDLEWARES_BASE': {
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 800,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
},
'DOWNLOADER_MIDDLEWARES': {
'lianjia.filter_url.LjprojectSpiderMiddleware': 310,
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
# 'fangproject.useragent_middlewares.RandomUserAgent': 400,
},
'DOWNLOAD_DELAY': 0.3,
'ITEM_PIPELINES': {
'lianjia.pipelines.LjProjectPipeline': 300,
}
}
|
<filename>make_style_dataset.py
import os, json, argparse
from threading import Thread
from queue import Queue
import numpy as np
from scipy.misc import imread, imresize
import h5py
"""
Create an HDF5 file of images for training a feedforward style transfer model.
Original file created by <NAME> available at:
https://github.com/jcjohnson/fast-neural-style
"""
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', default='data/coco/images/train2014')
parser.add_argument('--val_dir', default='data/coco/images/val2014')
parser.add_argument('--output_file', default='data/ms-coco-256.h5')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--max_images', type=int, default=-1)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--include_val', type=int, default=1)
parser.add_argument('--max_resize', default=16, type=int)
args = parser.parse_args()
def add_data(h5_file, image_dir, prefix, args):
# Make a list of all images in the source directory
image_list = []
image_extensions = {'.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG'}
for filename in os.listdir(image_dir):
ext = os.path.splitext(filename)[1]
if ext in image_extensions:
image_list.append(os.path.join(image_dir, filename))
num_images = len(image_list)
# Resize all images and copy them into the hdf5 file
# We'll bravely try multithreading
dset_name = os.path.join(prefix, 'images')
# dset_size = (num_images, 3, args.height, args.width)
dset_size = (num_images, args.height, args.width, 3)
imgs_dset = h5_file.create_dataset(dset_name, dset_size, np.uint8)
# input_queue stores (idx, filename) tuples,
# output_queue stores (idx, resized_img) tuples
input_queue = Queue()
output_queue = Queue()
# Read workers pull images off disk and resize them
def read_worker():
while True:
idx, filename = input_queue.get()
img = imread(filename)
try:
# First crop the image so its size is a multiple of max_resize
H, W = img.shape[0], img.shape[1]
H_crop = H - H % args.max_resize
W_crop = W - W % args.max_resize
img = img[:H_crop, :W_crop]
img = imresize(img, (args.height, args.width))
except (ValueError, IndexError) as e:
print(filename)
print(img.shape, img.dtype)
print(e)
input_queue.task_done()
output_queue.put((idx, img))
# Write workers write resized images to the hdf5 file
def write_worker():
num_written = 0
while True:
idx, img = output_queue.get()
if img.ndim == 3:
# RGB image, transpose from H x W x C to C x H x W
# DO NOT TRANSPOSE
imgs_dset[idx] = img
elif img.ndim == 2:
# Grayscale image; it is H x W so broadcasting to C x H x W will just copy
# grayscale values into all channels.
# COPY GRAY SCALE TO CHANNELS DIMENSION
img_dtype = img.dtype
imgs_dset[idx] = (img[:, :, None] * np.array([1, 1, 1])).astype(img_dtype)
output_queue.task_done()
num_written = num_written + 1
if num_written % 100 == 0:
print('Copied %d / %d images' % (num_written, num_images))
# Start the read workers.
for i in range(args.num_workers):
t = Thread(target=read_worker)
t.daemon = True
t.start()
# h5py locks internally, so we can only use a single write worker =(
t = Thread(target=write_worker)
t.daemon = True
t.start()
for idx, filename in enumerate(image_list):
if args.max_images > 0 and idx >= args.max_images: break
input_queue.put((idx, filename))
input_queue.join()
output_queue.join()
if __name__ == '__main__':
with h5py.File(args.output_file, 'w') as f:
add_data(f, args.train_dir, 'train2014', args)
if args.include_val != 0:
add_data(f, args.val_dir, 'val2014', args)
|
<reponame>dnabanita7/PySyft
# stdlib
from abc import ABC
from collections import OrderedDict
from collections import UserDict
from collections import UserList
from collections import UserString
from typing import Any
from typing import Optional
from typing import Union
# syft relative
from .. import python
from ...core.common import UID
from ...logger import traceback_and_raise
from .primitive_interface import PyPrimitive
NoneType = type(None)
primitives = [
bool,
dict,
complex,
float,
int,
list,
tuple,
set,
slice,
None,
NoneType,
str,
UserDict,
UserList,
UserString,
OrderedDict,
]
PrimitiveType = Union[
bool,
dict,
complex,
float,
int,
tuple,
list,
set,
slice,
None,
NoneType,
str,
UserDict,
UserList,
UserString,
OrderedDict,
]
def isprimitive(value: Any) -> bool:
if not issubclass(type(value), PyPrimitive) and type(value) in primitives:
return True
return False
class PrimitiveFactory(ABC):
def upcast(self) -> Union[int, float, bool, complex, list, str, None]:
traceback_and_raise(NotImplementedError)
@staticmethod
def generate_primitive(
value: Union[PrimitiveType, type(NotImplemented), PyPrimitive], # type: ignore
id: Optional[UID] = None,
recurse: bool = False,
) -> Any:
if isinstance(value, PyPrimitive):
return value
if isinstance(value, bool):
return python.Bool(value=value, id=id)
if isinstance(value, int):
return python.Int(value=value, id=id)
if isinstance(value, float):
return python.Float(value=value, id=id)
if isinstance(value, complex):
return python.Complex(real=value.real, imag=value.imag, id=id)
if isinstance(value, tuple):
return python.Tuple(value)
if isinstance(value, set):
return python.Set(value)
if isinstance(value, slice):
return python.Slice(
start=value.start, stop=value.stop, step=value.step, id=id
)
if type(value) in [list, UserList]:
if not recurse:
return python.List(value=value, id=id)
else:
# allow recursive primitive downcasting
new_list = []
if value is not None:
for val in value:
if isprimitive(value=val):
new_list.append(
PrimitiveFactory.generate_primitive(
value=val, recurse=recurse
)
)
else:
new_list.append(val)
return python.List(value=new_list, id=id)
if type(value) in [dict, UserDict, OrderedDict]:
constructor = (
python.collections.OrderedDict
if type(value) is OrderedDict
else python.Dict
)
if not recurse:
new_dict = constructor(value)
else:
# allow recursive primitive downcasting
new_dict = constructor()
if value is not None:
items = getattr(value, "items", None)
if items is not None:
for k, val in items():
if isprimitive(value=val):
new_dict[k] = PrimitiveFactory.generate_primitive(
value=val, recurse=recurse
)
else:
new_dict[k] = val
# if we pass id in as a kwargs it ends up in the actual dict
if id is not None:
new_dict._id = id
return new_dict
if type(value) in [str, UserString]:
return python.String(value=value, id=id)
if value is NotImplemented:
return value
return python.SyNone
|
<filename>src/external/coremltools_wrap/coremltools/coremltools/test/pipeline/test_model_updatable.py
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os, shutil
import numpy as _np
import coremltools.models.datatypes as datatypes
import unittest
import pytest
import tempfile
from coremltools.models.utils import save_spec
from coremltools.models import MLModel
from coremltools.models.neural_network import (
NeuralNetworkBuilder,
AdamParams,
SgdParams,
)
from coremltools.models.pipeline import PipelineRegressor, PipelineClassifier
class MLModelUpdatableTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.model_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(self):
if os.path.exists(self.model_dir):
shutil.rmtree(self.model_dir)
def create_base_builder(self):
self.input_features = [("input", datatypes.Array(3))]
self.output_features = [("output", None)]
self.output_names = ["output"]
builder = NeuralNetworkBuilder(self.input_features, self.output_features)
W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
builder.add_inner_product(
name="ip1",
W=W1,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="input",
output_name="hidden",
)
builder.add_inner_product(
name="ip2",
W=W2,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="hidden",
output_name="output",
)
builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams
return builder
def test_updatable_model_creation_ce_sgd(self):
builder = self.create_base_builder()
builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="softmax_output"
)
builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))
builder.set_epochs(20, allowed_set=[10, 20, 30, 40])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue,
0,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue
== 1
)
def test_updatable_model_creation_ce_adam(self):
builder = self.create_base_builder()
builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="softmax_output"
)
adam_params = AdamParams()
adam_params.set_batch(value=10, allowed_set=[10, 20])
builder.set_adam_optimizer(adam_params)
builder.set_epochs(20)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue,
0.9,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue,
0.999,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue,
1e-8,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values
== [10, 20]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue
== 1
)
self.assertTrue(spec.neuralNetwork.updateParams.epochs.set.values == [20])
def test_updatable_model_creation_mse_sgd(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_sgd_optimizer(SgdParams(lr=1e-2, batch=10, momentum=0.0))
builder.set_epochs(20)
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.defaultValue,
0,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.sgdOptimizer.momentum.range.maxValue
== 1
)
def test_updatable_model_creation_mse_adam(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertTrue(spec.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[0].innerProduct.weights.isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].isUpdatable)
self.assertTrue(spec.neuralNetwork.layers[1].innerProduct.weights.isUpdatable)
self.assertTrue(
spec.neuralNetwork.updateParams.lossLayers[
0
].categoricalCrossEntropyLossLayer
is not None
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer is not None
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.defaultValue,
1e-2,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.defaultValue,
10,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.defaultValue,
0.9,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.defaultValue,
0.999,
atol=1e-4,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.defaultValue,
1e-8,
atol=1e-8,
)
)
self.assertTrue(
_np.isclose(
spec.neuralNetwork.updateParams.epochs.defaultValue, 20, atol=1e-4
)
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.learningRate.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.miniBatchSize.set.values
== [10]
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta1.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.beta2.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.minValue
== 0
)
self.assertTrue(
spec.neuralNetwork.updateParams.optimizer.adamOptimizer.eps.range.maxValue
== 1
)
self.assertTrue(
spec.neuralNetwork.updateParams.epochs.set.values == [10, 20, 30]
)
def test_nn_set_cce_without_softmax_fail(self):
nn_builder = self.create_base_builder()
# fails since adding CCE without softmax must raise error
with self.assertRaises(ValueError):
nn_builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="output"
)
def test_nn_set_cce_invalid(self):
nn_builder = self.create_base_builder()
nn_builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
# fails since CCE input must be softmax output
with self.assertRaises(ValueError):
nn_builder.set_categorical_cross_entropy_loss(
name="cross_entropy", input="output"
)
def test_nn_set_softmax_updatable_invalid(self):
nn_builder = self.create_base_builder()
nn_builder.add_softmax(
name="softmax", input_name="output", output_name="softmax_output"
)
# fails since marking softmax as updatable layer is not allowed
with self.assertRaises(ValueError):
nn_builder.make_updatable(["softmax"])
def test_nn_set_training_input(self):
builder = self.create_base_builder()
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertEqual(spec.description.trainingInput[0].name, "input")
self.assertEqual(
spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType"
)
self.assertEqual(spec.description.trainingInput[1].name, "output_true")
self.assertEqual(
spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType"
)
def test_nn_builder_with_training_features(self):
input_features = [("input", datatypes.Array(3))]
output_features = [("output", datatypes.Array(3))]
builder = NeuralNetworkBuilder(input_features, output_features)
W1 = _np.random.uniform(-0.5, 0.5, (3, 3))
W2 = _np.random.uniform(-0.5, 0.5, (3, 3))
builder.add_inner_product(
name="ip1",
W=W1,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="input",
output_name="hidden",
)
builder.add_inner_product(
name="ip2",
W=W2,
b=None,
input_channels=3,
output_channels=3,
has_bias=False,
input_name="hidden",
output_name="output",
)
builder.make_updatable(["ip1", "ip2"]) # or a dict for weightParams
builder.set_mean_squared_error_loss(
name="mse", input_feature=("output", datatypes.Array(3))
)
builder.set_adam_optimizer(
AdamParams(lr=1e-2, batch=10, beta1=0.9, beta2=0.999, eps=1e-8)
)
builder.set_epochs(20, allowed_set=[10, 20, 30])
model_path = os.path.join(self.model_dir, "updatable_creation.mlmodel")
print(model_path)
save_spec(builder.spec, model_path)
mlmodel = MLModel(model_path)
self.assertTrue(mlmodel is not None)
spec = mlmodel.get_spec()
self.assertEqual(spec.description.trainingInput[0].name, "input")
self.assertEqual(
spec.description.trainingInput[0].type.WhichOneof("Type"), "multiArrayType"
)
self.assertEqual(spec.description.trainingInput[1].name, "output_true")
self.assertEqual(
spec.description.trainingInput[1].type.WhichOneof("Type"), "multiArrayType"
)
def test_pipeline_regressor_make_updatable(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "Double")]
# fails due to missing sub-models
p_regressor = PipelineRegressor(
self.input_features, self.output_names, training_input
)
with self.assertRaises(ValueError):
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_regressor.add_model(builder.spec)
with self.assertRaises(ValueError):
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_regressor.add_model(builder.spec)
self.assertEqual(p_regressor.spec.isUpdatable, False)
p_regressor.make_updatable()
self.assertEqual(p_regressor.spec.isUpdatable, True)
self.assertEqual(p_regressor.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_regressor.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_regressor.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_regressor.spec.description.trainingInput[1].type.WhichOneof("Type"),
"doubleType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_regressor.add_model(builder.spec)
self.assertEqual(p_regressor.spec.isUpdatable, True)
def test_pipeline_classifier_make_updatable(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "String")]
# fails due to missing sub-models
p_classifier = PipelineClassifier(
self.input_features, self.output_names, training_features=training_input
)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_classifier.add_model(builder.spec)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, False)
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, True)
self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"),
"stringType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, True)
def test_pipeline_classifier_set_training_inputs(self):
builder = self.create_base_builder()
builder.spec.isUpdatable = False
training_input = [("input", datatypes.Array(3)), ("target", "String")]
# fails due to missing sub-models
p_classifier = PipelineClassifier(self.input_features, self.output_names)
p_classifier.set_training_input(training_input)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
# fails due to sub-model being not updatable
p_classifier.add_model(builder.spec)
with self.assertRaises(ValueError):
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, False)
builder.spec.isUpdatable = True
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, False)
p_classifier.make_updatable()
self.assertEqual(p_classifier.spec.isUpdatable, True)
self.assertEqual(p_classifier.spec.description.trainingInput[0].name, "input")
self.assertEqual(
p_classifier.spec.description.trainingInput[0].type.WhichOneof("Type"),
"multiArrayType",
)
self.assertEqual(p_classifier.spec.description.trainingInput[1].name, "target")
self.assertEqual(
p_classifier.spec.description.trainingInput[1].type.WhichOneof("Type"),
"stringType",
)
# fails since once updatable does not allow adding new models
with self.assertRaises(ValueError):
p_classifier.add_model(builder.spec)
self.assertEqual(p_classifier.spec.isUpdatable, True)
def test_shuffle_on_by_default(self):
builder = self.create_base_builder()
# base builder already marks two layers as updatable
self.assertTrue(
builder.nn_spec.updateParams.shuffle.defaultValue,
"Shuffle not turned on by default for updatable models",
)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The local coshell module.
A coshell is an interactive non-login /bin/bash running as a coprocess. It has
the same stdin, stdout and stderr as the caller and reads command lines from a
pipe. Only one command runs at a time. ^C interrupts and kills the currently
running command but does not kill the coshell. The coshell process exits when
the shell 'exit' command is executed. State is maintained by the coshell across
commands, including the current working directory and local and environment
variables. The "$ENV" file, if it exists, is sourced into the coshell at
startup. This gives the caller the opportunity to set up aliases and default
'set -o ...' shell modes.
Usage:
cosh = coshell.Coshell()
while True:
command = <the next command line to run>
try:
command_exit_status = cosh.Run(command)
except coshell.CoshellExitException:
break
coshell_exit_status = cosh.Close()
This module contains three Coshell implementations:
* _UnixCoshell using /bin/bash
* _MinGWCoshell using MinGW bash or git bash
* _WindowsCoshell using cmd.exe, does not support state across commands
On the first instantiation Coshell.__init__() determines what implementation to
use. All subsequent instantiations will use the same implementation.
"""
from __future__ import unicode_literals
import abc
import os
import re
import signal
import subprocess
import sys
_GET_COMPLETIONS_SHELL_FUNCTION = r"""
__get_completions__() {
# prints the completions for the (partial) command line "$@" followed by
# a blank line
local command completion_function
local COMP_CWORD COMP_LINE COMP_POINT COMP_WORDS COMPREPLY=()
(( $# )) || {
printf '\n'
return
}
command=$1
shift
COMP_WORDS=("$@")
# load bash-completion if necessary
declare -F _completion_loader &>/dev/null || {
source /usr/share/bash-completion/bash_completion 2>/dev/null || {
_completion_loader() {
return 1
}
return
}
}
# get the command specific completion function
set -- $(complete -p "$command" 2>/dev/null)
if (( $# )); then
shift $(( $# - 2 ))
completion_function=$1
else
# check the _completion_loader
(( $# )) || {
# load the completion function for the command
_completion_loader "$command"
# get the command specific completion function
set -- $(complete -p "$command" 2>/dev/null)
(( $# )) || {
printf '\n'
return
}
shift $(( $# - 2 ))
completion_function=$1
}
fi
# set up the completion call stack -- really, this is the api?
COMP_LINE=${COMP_WORDS[*]}
COMP_POINT=${#COMP_LINE}
# add '' to COMP_WORDS if the last character of the command line is a space
[[ ${COMP_LINE[@]: -1} = ' ' ]] && COMP_WORDS+=('')
# index of the last word
COMP_CWORD=$(( ${#COMP_WORDS[@]} - 1 ))
# execute the completion function
$completion_function
# print the completions to stdout
printf '%s\n' "${COMPREPLY[@]}" ''
}
"""
class CoshellExitException(Exception):
"""The coshell exited."""
def __init__(self, message, status=None):
super(CoshellExitException, self).__init__(message)
self.status = status
class _CoshellBase(object):
"""The local coshell base class.
Attributes:
_edit_mode: The coshell edit mode, one of {'emacs', 'vi'}.
_ignore_eof: True if the coshell should ignore EOF on stdin and not exit.
_state_is_preserved: True if shell process state is preserved across Run().
"""
__metaclass__ = abc.ABCMeta
def __init__(self, state_is_preserved=True):
# Immutable coshell object properties.
self._state_is_preserved = state_is_preserved
# Mutable shell modes controlled by `set -o ...` and `set +o ...`.
self._edit_mode = 'emacs'
self._ignore_eof = False
@property
def edit_mode(self):
return self._edit_mode
@property
def ignore_eof(self):
return self._ignore_eof
@property
def state_is_preserved(self):
return self._state_is_preserved
@staticmethod
def _ShellStatus(status):
"""Returns the shell $? status given a python Popen returncode."""
if status is None:
status = 0
elif status < 0:
status = 256 - status
return status
def Close(self):
"""Closes the coshell connection and release any resources."""
pass
@abc.abstractmethod
def Run(self, command, check_modes=True):
"""Runs command in the coshell and waits for it to complete.
Args:
command: The command line string to run. Must be a sytactically complete
shell statement. Nothing is executed if there is a syntax error.
check_modes: If True runs self._GetModes() after command has executed if
command contains `set -o ...` or `set +o ...`.
"""
pass
@abc.abstractmethod
def Interrupt(self, sig):
"""Sends the interrupt signal to the coshell."""
pass
def GetCompletions(self, args):
"""Returns the list of completion choices for args.
Args:
args: The list of command line argument strings to complete.
"""
del args
return None
def Communicate(self, args):
"""Runs args and returns the list of output lines, up to first empty one.
Args:
args: The list of command line arguments.
Returns:
The list of output lines from command args up to the first empty line.
"""
del args
return []
class _UnixCoshellBase(_CoshellBase):
"""The unix local coshell base class.
Attributes:
_shell: The coshell subprocess object.
"""
__metaclass__ = abc.ABCMeta
SHELL_STATUS_EXIT = 'x'
SHELL_STATUS_FD = 9
SHELL_STDIN_FD = 8
def __init__(self):
super(_UnixCoshellBase, self).__init__()
self.status = None # type: int
self._status_fd = None # type: int
self._shell = None # type: subprocess.Popen
@staticmethod
def _Quote(command):
"""Quotes command in single quotes so it can be eval'd in coshell."""
return "'{}'".format(command.replace("'", r"'\''"))
def _Exited(self):
"""Raises the coshell exit exception."""
try:
self._shell.communicate(':')
except (IOError, OSError, ValueError):
# Yeah, ValueError for IO on a closed file.
pass
status = self._ShellStatus(self._shell.returncode)
raise CoshellExitException(
'The coshell exited [status={}].'.format(status),
status=status)
def _SendCommand(self, command):
"""Sends command to the coshell for execution."""
try:
self._shell.stdin.write(command + '\n')
except (IOError, OSError, ValueError):
# Yeah, ValueError for IO on a closed file.
self._Exited()
def _GetStatus(self):
"""Gets the status of the last command sent to the coshell."""
status_string = ''
while True:
c = os.read(self._status_fd, 1)
if c in (None, '\n', self.SHELL_STATUS_EXIT):
break
status_string += c
if not status_string.isdigit() or c == self.SHELL_STATUS_EXIT:
self._Exited()
return int(status_string)
def _GetModes(self):
"""Syncs the user settable modes of interest to the Coshell."""
# Get the caller $ENV emacs/vi mode.
if self.Run('set -o | grep -q "^vi.*on"', check_modes=False) == 0:
self._edit_mode = 'vi'
else:
self._edit_mode = 'emacs'
# Get the caller $ENV ignoreeof setting.
self._ignore_eof = self.Run(
'set -o | grep -q "^ignoreeof.*on"', check_modes=False) == 0
def _GetUserConfigDefaults(self):
"""Consults the user shell config for defaults."""
self._SendCommand(
# The $ENV file configures aliases and set -o modes.
'[ -f "$ENV" ] && . "$ENV";'
# The exit command hits this trap, reaped by _GetStatus() in Run().
"trap 'echo $?{exit} >&{fdstatus}' 0;"
# This catches interrupts so commands die while the coshell stays alive.
'trap ":" 2;{get_completions}'
.format(exit=self.SHELL_STATUS_EXIT,
fdstatus=self.SHELL_STATUS_FD,
get_completions=_GET_COMPLETIONS_SHELL_FUNCTION))
# Enable job control if supported.
if not sys.platform.startswith('darwin'):
self._SendCommand('set -o monitor 2>/dev/null')
# Enable alias expansion if supported.
self._SendCommand('shopt -s expand_aliases 2>/dev/null')
# Sync the user settable modes to the coshell.
self._GetModes()
# Set $? to 0.
self._SendCommand('true')
@abc.abstractmethod
def _Run(self, command, check_modes=True):
"""Runs command in the coshell and waits for it to complete."""
pass
def Run(self, command, check_modes=True):
"""Runs command in the coshell and waits for it to complete."""
status = 130 # assume the worst: 128 (had signal) + 2 (it was SIGINT)
sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
status = self._Run(command, check_modes=check_modes)
except KeyboardInterrupt:
pass
finally:
signal.signal(signal.SIGINT, sigint)
return status
def Interrupt(self):
"""Sends the interrupt signal to the coshell."""
self._shell.send_signal(signal.SIGINT)
class _UnixCoshell(_UnixCoshellBase):
"""The unix local coshell implementation.
This implementation preserves coshell process state across Run().
Attributes:
_status_fd: The read side of the pipe where the coshell write 1 char status
lines. The status line is used to mark the exit of the currently running
command.
"""
SHELL_PATH = '/bin/bash'
def __init__(self):
super(_UnixCoshell, self).__init__()
# The dup/close/dup dance preserves caller fds that collide with SHELL_*_FD.
try:
caller_shell_status_fd = os.dup(self.SHELL_STATUS_FD)
except OSError:
caller_shell_status_fd = -1
os.dup2(1, self.SHELL_STATUS_FD)
try:
caller_shell_stdin_fd = os.dup(self.SHELL_STDIN_FD)
except OSError:
caller_shell_stdin_fd = -1
os.dup2(0, self.SHELL_STDIN_FD)
self._status_fd, w = os.pipe()
os.dup2(w, self.SHELL_STATUS_FD)
os.close(w)
self._shell = subprocess.Popen(
[self.SHELL_PATH], stdin=subprocess.PIPE, close_fds=False)
if caller_shell_status_fd >= 0:
os.dup2(caller_shell_status_fd, self.SHELL_STATUS_FD)
os.close(caller_shell_status_fd)
else:
os.close(self.SHELL_STATUS_FD)
if caller_shell_stdin_fd >= 0:
os.dup2(caller_shell_stdin_fd, self.SHELL_STDIN_FD)
os.close(caller_shell_stdin_fd)
else:
os.close(self.SHELL_STDIN_FD)
self._GetUserConfigDefaults()
def Close(self):
"""Closes the coshell connection and release any resources."""
if self._status_fd >= 0:
os.close(self._status_fd)
self._status_fd = -1
try:
self._shell.communicate('exit') # This closes internal fds.
except (IOError, ValueError):
# Yeah, ValueError for IO on a closed file.
pass
return self._ShellStatus(self._shell.returncode)
def _Run(self, command, check_modes=True):
"""Runs command in the coshell and waits for it to complete."""
self._SendCommand(
'command eval {command} <&{fdin} && echo 0 >&{fdstatus} || '
'{{ status=$?; echo $status 1>&{fdstatus}; (exit $status); }}'.format(
command=self._Quote(command),
fdstatus=self.SHELL_STATUS_FD,
fdin=self.SHELL_STDIN_FD))
status = self._GetStatus()
# Re-check shell shared modes.
if check_modes and re.search(r'\bset\s+[-+]o\s+\w', command):
self._GetModes()
return status
def Communicate(self, args):
"""Runs args and returns the list of output lines, up to first empty one.
Args:
args: The list of command line arguments.
Returns:
The list of output lines from command args up to the first empty line.
"""
self._SendCommand('{command} >&{fdstatus}\n'.format(
command=' '.join([self._Quote(arg) for arg in args]),
fdstatus=self.SHELL_STATUS_FD))
lines = []
line = []
while True:
try:
c = os.read(self._status_fd, 1)
except (IOError, OSError, ValueError):
# Yeah, ValueError for IO on a closed file.
self._Exited()
if c in (None, '\n'):
if not line:
break
lines.append(''.join(line).rstrip())
line = []
else:
line.append(c)
return lines
def GetCompletions(self, args):
"""Returns the list of completion choices for args.
Args:
args: The list of command line argument strings to complete.
Returns:
The list of completions for args.
"""
return sorted(self.Communicate(['__get_completions__'] + args))
class _MinGWCoshell(_UnixCoshellBase):
"""The MinGW local coshell implementation.
This implementation preserves coshell process state across Run().
NOTE: The Windows subprocess module passes fds 0,1,2 to the child process and
no others. It is possble to pass handles that can be converted to/from fds,
but the child process needs to know what handles to convert back to fds. Until
we figure out how to reconstitute handles as fds >= 3 we are stuck with
restricting fds 0,1,2 to be /dev/tty. Luckily this works for the shell
interactive prompt. Unfortunately this fails for the test environment.
"""
SHELL_PATH = None # Determined by the Coshell dynamic class below.
STDIN_PATH = '/dev/tty'
STDOUT_PATH = '/dev/tty'
def __init__(self):
super(_MinGWCoshell, self).__init__()
self._shell = self._Popen()
self._GetUserConfigDefaults()
def _Popen(self):
"""Mockable popen+startupinfo so we can test on Unix."""
startupinfo = subprocess.STARTUPINFO()
startupinfo.dWflags = subprocess.CREATE_NEW_PROCESS_GROUP
return subprocess.Popen([self.SHELL_PATH],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
startupinfo=startupinfo)
def Close(self):
"""Closes the coshell connection and release any resources."""
try:
self._shell.communicate('exit') # This closes internal fds.
except (IOError, ValueError):
# Yeah, ValueError for IO on a closed file.
pass
return self._ShellStatus(self._shell.returncode)
def _GetStatus(self):
"""Gets the status of the last command sent to the coshell."""
status_string = self._shell.stdout.readline().strip()
if status_string.endswith(self.SHELL_STATUS_EXIT):
c = self.SHELL_STATUS_EXIT
status_string = status_string[:-1]
else:
c = ''
if not status_string.isdigit() or c == self.SHELL_STATUS_EXIT:
self._Exited()
return int(status_string)
def _Run(self, command, check_modes=True):
"""Runs command in the coshell and waits for it to complete."""
self._SendCommand(
"command eval {command} <'{stdin}' >>'{stdout}' && echo 0 || "
"{{ status=$?; echo 1; (exit $status); }}".format(
command=self._Quote(command),
stdin=self.STDIN_PATH,
stdout=self.STDOUT_PATH,
))
status = self._GetStatus()
# Re-check shell shared modes.
if check_modes and re.search(r'\bset\s+[-+]o\s+\w+', command):
self._GetModes()
return status
def Interrupt(self):
"""Sends the interrupt signal to the coshell."""
self._shell.send_signal(signal.CTRL_C_EVENT) # pytype: disable=module-attr
class _WindowsCoshell(_CoshellBase):
"""The windows local coshell implementation.
This implementation does not preserve shell coprocess state across Run().
"""
def __init__(self):
super(_WindowsCoshell, self).__init__(state_is_preserved=False)
def Run(self, command, check_modes=False):
"""Runs command in the coshell and waits for it to complete."""
del check_modes
return subprocess.call(command, shell=True)
def Interrupt(self):
"""Sends the interrupt signal to the coshell."""
pass
def _RunningOnWindows():
"""Lightweight mockable Windows check."""
try:
return bool(WindowsError) # pytype: disable=name-error
except NameError:
return False
class Coshell(object):
"""The local coshell implementation shim.
This shim class delays os specific checks until the first instantiation. The
checks are memoized in the shim class for subsequent instantiations.
"""
_IMPLEMENTATION = None
def __new__(cls, *args, **kwargs):
if not cls._IMPLEMENTATION:
if _RunningOnWindows():
cls._IMPLEMENTATION = _WindowsCoshell
# We do an explicit search rather than PATH lookup because:
# (1) It's not clear that a git or MinGW installation automatically
# sets up PATH to point to sh.exe.
# (2) Picking up any old sh.exe on PATH on a Windows system is dicey.
for shell in [r'C:\MinGW\bin\sh.exe',
r'C:\Program Files\Git\bin\sh.exe']:
if os.path.isfile(shell):
cls._IMPLEMENTATION = _MinGWCoshell
cls._IMPLEMENTATION.SHELL_PATH = shell
break
else:
cls._IMPLEMENTATION = _UnixCoshell
obj = cls._IMPLEMENTATION.__new__(cls._IMPLEMENTATION, *args, **kwargs)
obj.__init__() # The docs say this is unnecessary.
return obj
|
# Asset manager provides tools for managing a local database of
# photos, that are tagged with point information. These photos can
# then be used in other commands, without us having to specify via the
# command line.
import pickle
import collections
import os
from pathlib import Path
from typing import List, Tuple
from .cmdlet import Cmdlet
PointType = Tuple[int, int]
# A tuple to represent an entry in our database
# - Name is the name of the file in our asset directory,
# - Points is a vector of 4 points, describing the perspective transform
# on an image
Entry = collections.namedtuple("Entry", "name points")
ASSET_FOLDER = "./assets"
ASSET_DATA_FILE = ASSET_FOLDER + "/data.dat"
# Singleton class
class AssetManager:
def __init__(self):
if not os.path.exists(ASSET_FOLDER):
os.mkdir(ASSET_FOLDER)
if not os.path.exists(ASSET_DATA_FILE):
self._entries = []
else:
with open(ASSET_DATA_FILE, "rb") as data_f:
self._entries = pickle.load(data_f)
# Adds the entry
def add(self, photo_path: Path, points: List[PointType]):
if not os.path.exists(photo_path):
raise ValueError
# Check the points, ensure they all are in a good format
validate_points(points)
# Hard link photo into asset folder.
name = photo_path.name
dst = Path(ASSET_FOLDER, name)
os.link(photo_path, dst)
# Modify state
entry = Entry(name, points)
self._entries.append(entry)
# Persist state
self.save()
def delete(self, n: int):
if n < len(self._entries) and n >= 0:
path = Path(ASSET_FOLDER, self._entries[n].name)
os.remove(path)
del (self._entries[n])
self.save()
# Writes data to main file
def save(self):
with open(ASSET_DATA_FILE, "wb") as data_f:
pickle.dump(self._entries, data_f)
# Returns the PATH (not name), and the transform points
def get(self, i: int):
entry = self._entries[i]
return (Path(ASSET_FOLDER, entry.name), entry.points)
def validate_points(points: List[PointType]):
if len(points) != 4:
raise ValueError
for p in points:
x = p[0]
y = p[1]
if type(x) != int or type(y) != int or x < 0 or y < 0:
raise ValueError
return True
def get_points(args):
return [
(args.x1, args.y1),
(args.x2, args.y2),
(args.x3, args.y3),
(args.x4, args.y4),
]
def add(args):
mgr = AssetManager()
print(args)
mgr.add(Path(args.photopath), assets.get_points(args))
add_cmd = Cmdlet("add", "Add an asset with set coordinates", add)
add_cmd.add_arg("photopath", help="Path of the photo you want to add").add_arg(
"x1", help="Pixel coordinates for perspective transform", type=int
).add_arg("y1", type=int).add_arg("x2", type=int).add_arg("y2", type=int).add_arg(
"x3", type=int
).add_arg(
"y3", type=int
).add_arg(
"x4", type=int
).add_arg(
"y4", type=int
)
def delete_asset(args):
mgr = AssetManager()
mgr.delete(args.n)
delete_cmd = Cmdlet("delete", "Remove an asset from the db", delete_asset)
delete_cmd.add_arg("n", type=int, help="The asset number you want to remove")
|
from typing import Dict, Union
import gym
import numpy as np
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
class TimeFeatureWrapper(gym.Wrapper):
"""
Add remaining, normalized time to observation space for fixed length episodes.
See https://arxiv.org/abs/1712.00378 and https://github.com/aravindr93/mjrl/issues/13.
.. note::
Only ``gym.spaces.Box`` and ``gym.spaces.Dict`` (``gym.GoalEnv``) 1D observation spaces
are supported for now.
:param env: Gym env to wrap.
:param max_steps: Max number of steps of an episode
if it is not wrapped in a ``TimeLimit`` object.
:param test_mode: In test mode, the time feature is constant,
equal to zero. This allow to check that the agent did not overfit this feature,
learning a deterministic pre-defined sequence of actions.
"""
def __init__(self, env: gym.Env, max_steps: int = 1000, test_mode: bool = False):
assert isinstance(
env.observation_space, (gym.spaces.Box, gym.spaces.Dict)
), "`TimeFeatureWrapper` only supports `gym.spaces.Box` and `gym.spaces.Dict` (`gym.GoalEnv`) observation spaces."
# Add a time feature to the observation
if isinstance(env.observation_space, gym.spaces.Dict):
assert "observation" in env.observation_space.spaces, "No `observation` key in the observation space"
obs_space = env.observation_space.spaces["observation"]
assert isinstance(
obs_space, gym.spaces.Box
), "`TimeFeatureWrapper` only supports `gym.spaces.Box` observation space."
obs_space = env.observation_space.spaces["observation"]
else:
obs_space = env.observation_space
assert len(obs_space.shape) == 1, "Only 1D observation spaces are supported"
low, high = obs_space.low, obs_space.high
low, high = np.concatenate((low, [0.0])), np.concatenate((high, [1.0]))
self.dtype = obs_space.dtype
if isinstance(env.observation_space, gym.spaces.Dict):
env.observation_space.spaces["observation"] = gym.spaces.Box(low=low, high=high, dtype=self.dtype)
else:
env.observation_space = gym.spaces.Box(low=low, high=high, dtype=self.dtype)
super().__init__(env)
# Try to infer the max number of steps per episode
try:
self._max_steps = env.spec.max_episode_steps
except AttributeError:
self._max_steps = None
# Fallback to provided value
if self._max_steps is None:
self._max_steps = max_steps
self._current_step = 0
self._test_mode = test_mode
def reset(self) -> GymObs:
self._current_step = 0
return self._get_obs(self.env.reset())
def step(self, action: Union[int, np.ndarray]) -> GymStepReturn:
self._current_step += 1
obs, reward, done, info = self.env.step(action)
return self._get_obs(obs), reward, done, info
def _get_obs(self, obs: Union[np.ndarray, Dict[str, np.ndarray]]) -> Union[np.ndarray, Dict[str, np.ndarray]]:
"""
Concatenate the time feature to the current observation.
:param obs:
:return:
"""
# Remaining time is more general
time_feature = 1 - (self._current_step / self._max_steps)
if self._test_mode:
time_feature = 1.0
time_feature = np.array(time_feature, dtype=self.dtype)
if isinstance(obs, dict):
obs["observation"] = np.append(obs["observation"], time_feature)
return obs
return np.append(obs, time_feature)
|
<gh_stars>0
#!/usr/bin/env python
# Copyright 2007 The Spitfire Authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
import copy
import logging
import optparse
import os.path
import sys
import time
import traceback
import io as StringIO
from spitfire.compiler import compiler
from spitfire.compiler import options
from spitfire.compiler import util
from spitfire.compiler import visitor
from spitfire import runtime
from spitfire.runtime import runner
from spitfire.runtime import udn
# this class let's me check if placeholder caching is working properly by
# tracking the number of accesses for a single key
class ResolveCounter(object):
def __init__(self):
self._dict = {}
@property
def resolve_x(self):
return self._get_item('resolve_x')
@property
def resolve_y(self):
return self._get_item('resolve_y')
def _get_item(self, key):
if key in self._dict:
self._dict[key] += 1
else:
self._dict[key] = 1
return '%s%s' % (key, self._dict[key])
def __contains__(self, key):
return key.startswith('resolve')
def __getitem__(self, key):
if not key.startswith('resolve'):
raise KeyError(key)
return self._get_item(key)
def __getattr__(self, key):
if not key.startswith('resolve'):
raise AttributeError(key)
return self._get_item(key)
sys_modules = list(sys.modules.keys())
def reset_sys_modules():
for key in list(sys.modules.keys()):
if key not in sys_modules:
del sys.modules[key]
class TestRunner(object):
def __init__(self, spt_compiler, spt_options, spt_files):
self.compiler = spt_compiler
self.options = spt_options
self.files = spt_files
self._search_list = [
{'tier1': {'tier2': ResolveCounter()}},
{'nest': ResolveCounter()},
ResolveCounter(),
]
if self.options.test_input:
self._search_list.append(runner.load_search_list(
self.options.test_input))
self.buffer = StringIO.StringIO()
self.start_time = 0
self.finish_time = 0
self.num_tests_run = 0
self.num_tests_failed = 0
# return a copy of the search_list for each set of tests
@property
def search_list(self):
return copy.deepcopy(self._search_list)
def run(self):
self.begin()
for filename in self.files:
self.process_file(filename)
self.end()
def begin(self):
self.start_time = time.time()
def end(self):
self.finish_time = time.time()
print(file=sys.stderr)
if self.num_tests_failed > 0:
sys.stderr.write(self.buffer.getvalue())
print('-' * 70, file=sys.stderr)
print('Ran %d tests in %0.3fs' % (
self.num_tests_run, self.finish_time - self.start_time), file=sys.stderr)
print(file=sys.stderr)
if self.num_tests_failed > 0:
print('FAILED (failures=%d)' % self.num_tests_failed, file=sys.stderr)
sys.exit(1)
else:
print('OK', file=sys.stderr)
sys.exit(0)
def process_file(self, filename):
buffer = StringIO.StringIO()
reset_sys_modules()
classname = util.filename2classname(filename)
modulename = util.filename2modulename(filename)
test_output_path = os.path.join(self.options.test_output,
classname + '.txt')
if self.options.verbose:
sys.stderr.write(modulename + ' ... ')
compile_failed = False
if self.options.debug or self.options.compile:
try:
self.compiler.compile_file(filename)
except Exception as e:
compile_failed = True
print('=' * 70, file=buffer)
print('FAIL:', modulename, '(' + filename + ')', file=buffer)
print('-' * 70, file=buffer)
traceback.print_exc(None, buffer)
if self.options.debug:
if 'parse_tree' in self.options.debug_flags:
print("parse_tree:", file=buffer)
visitor.print_tree(self.compiler._parse_tree, output=buffer)
if 'analyzed_tree' in self.options.debug_flags:
print("analyzed_tree:", file=buffer)
visitor.print_tree(self.compiler._analyzed_tree,
output=buffer)
if 'optimized_tree' in self.options.debug_flags:
print("optimized_tree:", file=buffer)
visitor.print_tree(self.compiler._optimized_tree,
output=buffer)
if 'hoisted_tree' in self.options.debug_flags:
print("hoisted_tree:", file=buffer)
visitor.print_tree(self.compiler._hoisted_tree,
output=buffer)
if 'source_code' in self.options.debug_flags:
print("source_code:", file=buffer)
for i, line in enumerate(self.compiler._source_code.split(
'\n')):
print('% 3s' % (i + 1), line, file=buffer)
test_failed = False
if not self.options.skip_test:
import tests
current_output = None
raised_exception = False
try:
if self.options.debug or self.options.compile:
template_module = util.load_module_from_src(
self.compiler._source_code, filename, modulename)
else:
template_module = runtime.import_module_symbol(modulename)
except Exception as e:
# An exception here means the template is unavailble; the test
# fails.
test_failed = True
raised_exception = True
current_output = str(e)
if not test_failed:
try:
template_class = getattr(template_module, classname)
template = template_class(search_list=self.search_list)
current_output = template.main().encode('utf8')
except Exception as e:
# An exception here doesn't meant that the test fails
# necessarily since libraries don't have a class; as long as
# the expected output matches the exception, the test
# passes.
raised_exception = True
current_output = str(e)
if not test_failed:
if self.options.test_accept_result:
test_file = open(test_output_path, 'w')
test_file.write(current_output)
test_file.close()
try:
test_file = open(test_output_path)
except IOError as e:
# An excpetion here means that the expected output is
# unavailbe; the test fails.
test_failed = True
raised_exception = True
current_output = str(e)
if test_failed:
test_output = None
else:
test_output = test_file.read()
if current_output != test_output:
test_failed = True
if self.options.debug:
print("expected output:", file=buffer)
print(test_output, file=buffer)
print("actual output:", file=buffer)
print(current_output, file=buffer)
if compile_failed or test_failed:
self.num_tests_failed += 1
if self.options.verbose:
sys.stderr.write('FAIL\n')
else:
sys.stderr.write('F')
current_output_path = os.path.join(self.options.test_output,
classname + '.failed')
f = open(current_output_path, 'w')
f.write(current_output)
f.close()
print('=' * 70, file=buffer)
print('FAIL:', modulename, '(' + filename + ')', file=buffer)
print('-' * 70, file=buffer)
print('Compare expected and actual output with:', file=buffer)
print(' '.join([' diff -u', test_output_path,
current_output_path]), file=buffer)
print('Show debug information for the test with:', file=buffer)
test_cmd = [arg for arg in sys.argv if arg not in self.files]
if '--debug' not in test_cmd:
test_cmd.append('--debug')
test_cmd = ' '.join(test_cmd)
print(' ', test_cmd, filename, file=buffer)
if raised_exception:
print('-' * 70, file=buffer)
print(current_output, file=buffer)
traceback.print_exc(None, buffer)
print(file=buffer)
self.buffer.write(buffer.getvalue())
else:
if self.options.verbose:
sys.stderr.write('ok\n')
else:
sys.stderr.write('.')
self.num_tests_run += 1
if __name__ == '__main__':
reload(sys)
sys.setdefaultencoding('utf8')
option_parser = optparse.OptionParser()
options.add_common_options(option_parser)
option_parser.add_option('-c',
'--compile',
action='store_true',
default=False)
option_parser.add_option('--skip-test', action='store_true', default=False)
option_parser.add_option(
'--test-input',
default='tests/input/search_list_data.pye',
help='input data file for templates (.pkl or eval-able file)')
option_parser.add_option('--test-output',
default='tests/output',
help="directory for output")
option_parser.add_option(
'--test-accept-result',
action='store_true',
default=False,
help='accept current code output as correct for future tests')
option_parser.add_option('--debug', action='store_true', default=False)
option_parser.add_option(
'--debug-flags',
action='store',
default='hoisted_tree,source_code',
help='parse_tree, analyzed_tree, optimized_tree, hoisted_tree, source_code')
option_parser.add_option('--enable-c-accelerator',
action='store_true',
default=False)
(spt_options, spt_args) = option_parser.parse_args()
if spt_options.debug:
spt_options.verbose = True
spt_options.debug_flags = getattr(spt_options, 'debug_flags').split(',')
else:
spt_options.debug_flags = []
udn.set_accelerator(spt_options.enable_c_accelerator, enable_test_mode=True)
spt_compiler_args = compiler.Compiler.args_from_optparse(spt_options)
spt_compiler = compiler.Compiler(**spt_compiler_args)
test_runner = TestRunner(spt_compiler, spt_options, spt_args)
test_runner.run()
|
import math
import torch
from torch import nn
import torch.nn.functional as F
class AAEmbeddings(nn.Module):
def __init__(self, embed_dim):
super(AAEmbeddings, self).__init__()
self.embed_dim = embed_dim
self.onehot = nn.Embedding(21, 21)
self.onehot.weight.data = torch.eye(21)
self.onehot.weight.requires_grad = False
self.aa_embeddings = nn.Linear(21, embed_dim)
def forward(self, seq_ids):
"""
seq_ids: (B, L)
return: (B, L, C)
"""
x = self.onehot(seq_ids)
x = self.aa_embeddings(x)
return x
class FeedForward(nn.Module):
def __init__(self, ninp, dim_feedforward, dropout):
super(FeedForward, self).__init__()
self.linear1 = nn.Linear(ninp, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, ninp)
self.norm1 = nn.LayerNorm(ninp)
self.norm2 = nn.LayerNorm(ninp)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = nn.ReLU()
def forward_fn(self, x, branch):
x = x + self.dropout1(branch)
x = self.norm1(x)
branch = self.linear2(self.dropout(self.activation(self.linear1(x))))
x = x + self.dropout2(branch)
x = self.norm2(x)
return x
def forward(self, x, branch):
return self.forward_fn(x, branch)
class X3DAttention(nn.Module):
def __init__(self, ninp, nhead, dim2d, dropout):
super(X3DAttention, self).__init__()
if ninp % nhead != 0:
raise ValueError(
"The hidden size is not a multiple of the number of attention heads"
)
self.nhead = nhead
self.ninp = ninp
self.fc_query = nn.Linear(ninp, ninp)
self.fc_key = nn.Linear(ninp, ninp)
self.fc_value = nn.Linear(ninp, ninp)
self.pre_proj = nn.Sequential(
nn.Linear(dim2d, ninp),
nn.LayerNorm(ninp),
nn.ReLU(),
)
self.efc_q = nn.Linear(ninp, ninp)
self.efc_k = nn.Linear(ninp, ninp)
self.fc = nn.Linear(ninp, ninp)
self.dropout = nn.Dropout(dropout)
def transpose_for_scores(self, x, nhead):
"""
x has shape (*, L, C)
return shape (*, nhead, L, C/nhead)
"""
new_shape = x.shape[:-1] + (nhead, -1)
x = x.view(*new_shape)
return x.transpose(-3, -2)
def forward(self, x2d, x3d):
"""
x2d has shape (B, L, L, C)
x3d has shape (B, L, C)
return shape (B, L, C)
"""
query = self.transpose_for_scores(self.fc_query(x3d), self.nhead)
key = self.transpose_for_scores(self.fc_key(x3d), self.nhead)
value = self.transpose_for_scores(self.fc_value(x3d), self.nhead)
edge = self.pre_proj(x2d)
# (B, L, nhead, L, C)
efq = self.transpose_for_scores(self.efc_q(edge), self.nhead)
efk = self.transpose_for_scores(self.efc_k(edge), self.nhead)
attention_scores = torch.matmul(query, key.transpose(-1, -2))
ex = torch.sum(efk * query.unsqueeze(1), dim=-1).permute(0, 2, 1, 3)
attention_scores += ex
attention_scores = attention_scores / math.sqrt(self.ninp / self.nhead)
attention_weights = F.softmax(attention_scores, dim=-1)
attention_weights = self.dropout(attention_weights)
x1 = torch.matmul(attention_weights, value)
x2 = torch.sum(attention_scores.unsqueeze(2) * efq.permute(0, 2, 4, 1, 3), dim=-1)
x = x1 + x2.transpose(-1, -2)
x = x.transpose(-3, -2)
x = x.reshape(*x.shape[:-2], -1)
x = self.dropout(self.fc(x))
return x
class X3DTransformerLayer(nn.Module):
def __init__(self, ninp, nhead, dim_feedforward, dropout, dim2d):
super(X3DTransformerLayer, self).__init__()
self.attention = X3DAttention(
ninp=ninp, nhead=nhead, dim2d=dim2d + 6, dropout=dropout
)
self.feed_forward = FeedForward(
ninp=ninp, dim_feedforward=dim_feedforward, dropout=dropout
)
def forward(self, x2d, x3d, pt, pr):
pose_feat = torch.matmul(
pr.unsqueeze(2),
(pt.unsqueeze(1) - pt.unsqueeze(2)).unsqueeze(-1),
).squeeze(-1)
pose_feat = pose_feat / 10
new_x2d = torch.cat([x2d, pose_feat, pose_feat.transpose(-2, -3)], dim=-1)
branch = self.attention(new_x2d, x3d)
x3d = self.feed_forward(x3d, branch)
return x2d, x3d
class X3DTransformer(nn.Module):
def __init__(self, n_layer, **kwargs):
super(X3DTransformer, self).__init__()
self.layers = nn.ModuleList(
[X3DTransformerLayer(**kwargs) for _ in range(n_layer)]
)
ninp = kwargs["ninp"]
self.fc = nn.Sequential(nn.Linear(ninp, ninp), nn.ReLU(), nn.Linear(ninp, 9))
def forward(self, node_feat, x2d, pose, prefix=""):
x3d = node_feat
extra = {}
pt, pr = pose
for i, model_fn in enumerate(self.layers):
x2d, x3d = model_fn(x2d, x3d, pt, pr)
x = self.fc(x3d)
x = x.reshape(*x.shape[:-1], 3, 3)
nt = pt + torch.matmul(pr.transpose(-1, -2), x[..., :1]).squeeze(-1)
v1, v2 = x[..., 1], x[..., 2]
v1 = v1 / torch.norm(v1, dim=-1).unsqueeze(-1)
v2 = v2 / torch.norm(v2, dim=-1).unsqueeze(-1)
e1 = v1
e2 = torch.cross(e1, v2)
e3 = torch.cross(e1, e2)
rot_inv = torch.cat([e1, e2, e3], dim=-1).reshape(*e3.shape[:-1], 3, 3)
nr = torch.matmul(rot_inv, pr)
return x2d, (nt, nr)
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.node_dim = 256
self.edge_dim = 128 + 37 + 25 + 25 + 13
self.max_pos = 128
self.aa_embeddings = nn.Sequential(
AAEmbeddings(self.node_dim), nn.LayerNorm(self.node_dim)
)
self.pos_embeddings = nn.Sequential(
nn.Embedding(self.max_pos * 2 + 1, 128), nn.LayerNorm(128)
)
self.x3d_transformer = X3DTransformer(
n_layer=4,
ninp=self.node_dim,
nhead=self.node_dim // 16,
dim_feedforward=self.node_dim * 4,
dropout=0.1,
dim2d=self.edge_dim,
)
def forward(self, data):
node_feat = self.aa_embeddings(data["seq"])
seq = data["seq"]
index = torch.arange(seq.shape[1], device=seq.device)[None]
rp = index.unsqueeze(2) - index.unsqueeze(1) + self.max_pos
rp = torch.clamp(rp, 0, self.max_pos * 2)
edge_feat = torch.cat([self.pos_embeddings(rp), data["pwt_feat"]], dim=-1)
B, L = node_feat.shape[:2]
init_T = torch.zeros(B, L, 3).to(node_feat)
init_R = torch.eye(3).to(node_feat)[None, None].repeat(B, L, 1, 1)
ret = {}
pose = (init_T, init_R)
n_iter = 6 if self.training else 20
for i in range(n_iter):
_, pose = self.x3d_transformer(node_feat, edge_feat, pose)
pose = (pose[0].detach(), pose[1].detach())
return pose
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013, <NAME>
# Author: <NAME> <<EMAIL>>
import collections
import serial
import threading
import time
import constants
# Override thread quit exception handler
class ThreadQuitException(Exception):
pass
class BLEParser(threading.Thread):
"""
A parser for event packets as defined by the the Texas Instruments Bluetooth
Low Energy Host-Controller-Interface (HCI).
Capable of monitoring a serial device, parsing the packets and returning to
a calling method by callback
Based heavily on python-xbee library by <NAME>.
"""
#dictionaries
#opcodes for command packets
opcodes = constants.opcodes
#structure of event packets
hci_events = constants.hci_events
#parameter formats for HCI_LE_ExtEvent
ext_events = constants.ext_events
def __init__(self, ser=None, callback=None):
"""
Initialises the class
@param ser: The file like serial port to use
@type ser: serial.Serial
@param callback: The callback method
@type callback: <function>
"""
super(BLEParser, self).__init__()
self.serial_port = ser
self._callback = None
self._thread_continue = False
self._stop = threading.Event()
if callback:
self._callback = callback
self._thread_continue = True
self.start()
def run(self):
"""
Overrides threading.Thread.run() and is automatically
called when an instance is created with threading enabled
"""
while True:
try:
self._callback(self.wait_read())
except ThreadQuitException:
break
def stop(self):
"""
Stops the thread and closes the serial port
"""
self._thread_continue = False
self.serial_port.close()
self._stop.set()
def stopped(self):
"""
Getter method for isSet variable
>>> stopped()
false
"""
return self._stop.isSet()
def _wait_for_frame(self):
"""
Reads from the serial port until a valid HCI packet arrives. It will then
return the binary data contained within the packet.
@return: A byte string of the correct length
"""
#loop forever...
while True:
#...unless told not to by setting "_thread_continue" to false
if self._callback and not self._thread_continue:
raise ThreadQuitException
#prevent blocking the port by waiting a given time
#TODO. Remove this? Asynchronous read-write possible
if self.serial_port.inWaiting() == 0:
time.sleep(.01)
continue
#length byte is stored as the third byte in an event packet
packet = self.serial_port.read(3)
#convert this to decimal...
data_len = int(packet[2].encode('hex'), 16)
#...and retrieve that many bytes from the serial port
for x in range(0,data_len):
packet += self.serial_port.read()
return packet
def _split_response(self, data):
"""
Takes a data packet received from a TI BLE device and parses it.
>>> _split_response("\\x04\\xFF\\x08\\x7F\\x06\\x00\\x31\\xFE\\x02\\xD0\\x07")
('\\x04\\xff\\x08\\x7f\\x06\\x00\\x31\\xfe\\x02\\xd0\\x07', OrderedDict(
[('type', ('\\x04', 'Event')),
('event_code', ('\\xff', 'HCI_LE_ExtEvent')),
('data_len', ('\\x02', '02')),
('event', ('\\x06\\x7f', 'GAP_HCI_ExtensionCommandStatus')),
('status', ('\\x00', '00')),
('op_code', ('\\31\\xfe', 'GAP_GetParam')),
('param_value', ('\\xd0\\x07', '07d0'))]))
@param data: The byte string to split and parse
@type data: hex
@return: An ordered dictionary (data order is important) containing binary
tuples, in which the first piece of data corresponds to the raw byte
string value and the second piece corresponds to its parsed
"meaning"
"""
packet_type = data[0]
event_code = data[1]
data_len = data[2]
#check for matching event codes in dictionary and store the matching
# packet format
try:
packet = self.hci_events[event_code.encode('hex')]
except AttributeError:
raise NotImplementedError("Error with Attribute")
except KeyError:
raise KeyError("Unrecognized response packet with event" +
" type {0}".format(event_code.encode('hex')))
packet_type_parsed = "Event"
event_code_parsed = packet['name']
data_len_parsed = int(data_len.encode('hex'),16)
#packet match found, hence start storing result
parsed_packet = collections.OrderedDict()
parsed_packet['type'] = (packet_type, packet_type_parsed)
parsed_packet['event_code'] = (event_code, event_code_parsed)
parsed_packet['data_len'] = (data_len, data_len_parsed)
#store the packet structure for working with in next step
packet_structure = packet['structure']
#special handler for HCI_LE_ExtEvent
if event_code_parsed == 'HCI_LE_ExtEvent':
#current byte index in the data stream
index = 6
#event_subcode is two-bytes given in reverse (endian mismatch?)
event_subcode = data[3:5][::-1] #reverse byte order [::-1]
event_status = data[5]
try:
subpacket = self.ext_events[event_subcode.encode('hex')]
except AttributeError:
raise NotImplementedError("Error with Attribute")
except KeyError:
print data.encode('hex')
raise KeyError("Unrecognized response packet with event" +
" type {0}".format(data[3:5][::-1]))
event_subcode_parsed = subpacket['name']
event_status_parsed = event_status.encode('hex')
#subpacket match found, hence store result
parsed_packet['event'] = (event_subcode, event_subcode_parsed)
parsed_packet['status'] = (event_status, event_status_parsed)
#store the subpacket structure for working with in next step
subpacket_structure = subpacket['structure']
#parse the subpacket in the order specified, by processing each
# required value as needed
for field in subpacket_structure:
field_name = field['name']
#if the data field has a fixed length, process it normally
if field['len'] is not None:
#store the number of bytes specified in the dictionary
field_data = data[index:(index + field['len'])]
field_data_parsed = field_data[::-1].encode('hex')
#store result
parsed_packet[field_name] = (field_data, field_data_parsed)
#increment index for next field
index += field['len']
#if the data field has no length specified, store any leftover
# bytes and quit
else:
field_data = data[index:]
#were there any remaining bytes?
if field_data:
#if so, store them
field_data_parsed = field_data[::-1].encode('hex')
parsed_packet[field_name] = (field_data, field_data_parsed)
index += len(field_data)
break
if event_subcode.encode('hex') == "0580":
pass
#check if there are remaining bytes. If so, raise an exception
if index < data_len_parsed:
raise ValueError("Response packet was longer than expected;" +
"expected: %d, got: %d bytes" % (index, data_len_parsed))
#check for parsing rules and apply them if they exist
if 'parsing' in subpacket:
for parse_rule in subpacket['parsing']:
#only apply a rule if relevant (raw data available)
parse_rule_name = parse_rule[0]
parse_rule_def = parse_rule[1]
if parse_rule_name in parsed_packet:
#apply the parse function to the indicated field
# and replace the raw data with the result
parsed_packet[parse_rule_name] = parse_rule_def(self,parsed_packet)
return (data, parsed_packet)
def _parse_opcodes(self, parsed_packet):
"""
Functions as a special parsing routine for the "GAP HCI Extention Command
Status" HCI LE ExtEvent.
>>> _parse_opcodes(("\\x04\\xFE", "fe04"))
("\\x04\\xFE", "GAP_DeviceDiscoveryRequest")
@param parsed_packet: A tuple of a byte string and the ascii encoded copy
@type parsed_packet: (hex, string)
@return: An ordered dictionary (data order is important) containing binary
tuples, in which the first piece of data corresponds to the raw
byte string value and the second piece corresponds to its parsed
"meaning" - the command name sourced by lookup of the command dict
"""
value = self.opcodes[parsed_packet[1]]
return (parsed_packet[0], value)
def _parse_devices(self, orig_devices):
"""
Functions as a special parsing routine for the "GAP Device Discovery
Done" HCI LE ExtEvent.
>>> _parse_devices(("\\x00\\x00\\x57\\x6A\\xE4\\x31\\x18\\x00", "0000576AE4311800"))
[OrderedDict([
('event_type', ('\\x00', '00')),
('addr_type', ('\\x00', '00')),
('addr', ('\\x57\\x6a\\xe4\\x31\\x18\\x00', '001831e46a57'))
])]
@param orig_devices: A tuple of a byte string and the ascii encoded copy
@type orig_devices: (hex, string)
@return: An ordered dictionary (data order is important) containing binary
tuples, in which the first piece of data corresponds to the raw
byte string value and the second piece corresponds to its parsed
"meaning" - currently just the hex encoded version of the string
"""
parsed_devices = []
#seperate the byte string containing the devices into groups of eight
# bytes
for idx, device in enumerate([orig_devices[0][i:i+8] for i in
range(0, len(orig_devices[0]), 8)]):
event_type = device[0]
addr_type = device[1]
addr = device[2:9]
event_type_parsed = event_type.encode('hex')
addr_type_parsed = addr_type.encode('hex')
addr_parsed = addr[::-1].encode('hex')
#store the parsed device as an ordered dictionary (order once again
# important)
temp_device = collections.OrderedDict()
temp_device['event_type'] = (event_type, event_type_parsed)
temp_device['addr_type'] = (addr_type, addr_type_parsed)
temp_device['addr'] = (addr, addr_parsed)
#append the ordered dict containing the parsed device to a list
parsed_devices.append(temp_device)
#return the resulting list
return parsed_devices
def _parse_read_results(self, results):
"""
Functions as a special parsing routine for the "ATT Read By Type Rsp" HCI
LE ExtEvent.
>>> _parse_read_results(("\\x00\\x00\\x57\\x6A\\xE4\\x31\\x18\\x00", "0000576AE4311800"))
TODO
@param results: A tuple of a byte string and the ascii encoded copy
@type results: (hex, string)
@return: An ordered dictionary (data order is important) containing binary
tuples, in which the first piece of data corresponds to the raw
byte string value and the second piece corresponds to its parsed
"meaning" - currently just the hex encoded version of the string
"""
parsed_results = []
#seperate the byte string containing the results into groups of eight
# bytes
for idx, result in enumerate([results[0][i:i+8] for i in
range(0, len(results[0]), 8)]):
handle = result[0:2]
data = result[2:9]
handle_parsed = handle[::-1].encode('hex')
data_parsed = data[::-1].encode('hex')
#store the parsed result as an ordered dictionary (order once again
# important)
temp_result = collections.OrderedDict()
temp_result['handle'] = (handle, handle_parsed)
temp_result['data'] = (data, data_parsed)
#append the ordered dict containing the parsed result to a list
parsed_results.append(temp_result)
#return the resulting list
return parsed_results
def _parse_find_info_results(self, results, rsp_format):
"""
Functions as a special parsing routine for the "ATT_FindInfoRsp" HCI
LE ExtEvent.
>>> _parse_find_info_results(("\\x00\\x00\\x57\\x6A\\xE4\\x31\\x18\\x00", "0000576AE4311800"))
TODO
@param results: A tuple of a byte string and the ascii encoded copy
@type results: (hex, string)
@return: An ordered dictionary (data order is important) containing binary
tuples, in which the first piece of data corresponds to the raw
byte string value and the second piece corresponds to its parsed
"meaning" - currently just the hex encoded version of the string
"""
parsed_results = []
# rsp_format = results[0][0]
# rsp_format_parsed = rsp_format.encode('hex')
# parsed_results.append({'format': (rsp_format, rsp_format_parsed)})
data = results[0][0::]
if rsp_format[1] == "01":
#A List Of 1 Or More Handles With Their 16-bit Bluetooth UUIDs
for idx, result in enumerate([data[i:i+4] for i in
range(0, len(data), 4)]):
handle = result[0:2]
uuid = result[2:4]
handle_parsed = handle[::-1].encode('hex')
uuid_parsed = uuid[::-1].encode('hex')
#store the parsed result as an ordered dictionary (order once again
# important)
temp_result = collections.OrderedDict()
temp_result['handle'] = (handle, handle_parsed)
temp_result['uuid'] = (uuid, uuid_parsed)
#append the ordered dict containing the parsed result to a list
parsed_results.append(temp_result)
elif rsp_format[1] == "02":
handle = data[0:2]
handle_parsed = handle[::-1].encode('hex')
uuid = data[2::]
uuid_parsed = uuid[::-1].encode('hex')
temp_result = collections.OrderedDict()
temp_result['handle'] = (handle, handle_parsed)
temp_result['uuid'] = (uuid, uuid_parsed)
parsed_results.append(temp_result)
else:
raise Exception("Unrecognized data format: %s" % rsp_format[1])
#return the resulting list
return parsed_results
def _parse_find_by_type_results(self, results):
try:
data = results[0][0::]
for idx, result in enumerate([data[i:i+4] for i in
range(0, len(data), 4)]):
start_handle = result[0:2]
end_handle = result[2:4]
return start_handle, end_handle
except:
return "\x01\x00", "\xfe\xff" #if error in parsing, return full range
def _parse_read_by_type_results(self, results):
try:
data = results[0][0::]
for idx, result in enumerate([data[i:i+4] for i in
range(0, len(data), 4)]):
handle = result[0:2]
data = result[2::]
return handle, data
except:
return None, None #if error in parsing, return full range
def wait_read(self):
"""
Combines both _wait_for_frame (to read a valid packet) and _split_response
(to parse that packet).
@return: A parsed version of the packet received on the serial port
"""
packet = self._wait_for_frame()
return self._split_response(packet) |
#!/usr/bin/env python
# coding: utf-8
import pickle
import numpy as np
import pandas as pds
from pyro.ops.stats import quantile
from scipy.stats import norm
import data_loader
import pyro_model.helper
# ## loading data
countries = [
'United Kingdom',
'Italy',
'Germany',
'Spain',
'US',
'France',
'Belgium',
'Korea, South',
'Brazil',
'Iran',
'Netherlands',
'Canada',
'Turkey',
'Romania',
'Portugal',
'Sweden',
'Switzerland',
'Ireland',
'Hungary',
'Denmark',
'Austria',
'Mexico',
'India',
'Ecuador',
'Russia',
'Peru',
'Indonesia',
'Poland',
'Philippines',
'Japan',
'Pakistan'
]
prefix = 'trained_models/'
# prefix = ''
pad = 24
data_dict = data_loader.get_data_pyro(countries, smart_start=False, pad=pad)
data_dict = pyro_model.helper.smooth_daily(data_dict)
days = 14
train_len = data_dict['cum_death'].shape[0] - days
test_dates = data_dict['date_list'][train_len:]
len(data_dict['date_list'][train_len:])
# ## loading results
seed_list = []
predictive_list = []
samples_list = []
for seed in range(15):
model_id = 'day-{}-rng-{}'.format(days, seed)
try:
with open(prefix + 'Loop{}/{}-predictive.pkl'.format(days, model_id), 'rb') as f:
predictive = pickle.load(f)
except Exception:
continue
predictive_list.append(predictive)
with open(prefix + 'Loop{}/{}-samples.pkl'.format(days, model_id), 'rb') as f:
samples = pickle.load(f)
samples_list.append(samples)
seed_list.append(seed)
# validation accuracy
val_window = 14
seir_error_list = []
for i in range(len(predictive_list)):
seir_train = quantile(predictive_list[i]['prediction'].squeeze(), 0.5, dim=0)[-val_window + 1:, :].numpy()
seir_train = np.diff(seir_train, axis=0)
seir_label = data_dict['daily_death'][train_len - val_window:train_len, :].numpy()
seir_error = np.abs(np.sum(seir_train, axis=0) - np.sum(seir_label, axis=0))
seir_error_list.append(seir_error)
seir_error = np.stack(seir_error_list, axis=0)
best_model = np.argmin(seir_error, axis=0)
best_seed = [seed_list[x] for x in best_model]
test_len = 14
best_error_list = []
pred_low_list = []
pred_high_list = []
covered_list = []
length_list = []
crps_list = []
test_len = test_len - 1
for j, i in zip(range(len(countries)), best_model):
c = countries[j]
# get daily death label
seir_label = data_dict['daily_death'][train_len:, j].numpy()
samples = samples_list[i]
sample_daily = np.diff(samples, axis=1)
model_pred = np.mean(sample_daily, axis=0)[:, j]
err = np.mean(np.abs(model_pred - seir_label)[:test_len + 1])
best_error_list.append(err)
# percentiles
sample_daily[sample_daily < 0] = 0
model_pred_low = np.quantile(sample_daily, 0.025, axis=0)[:, j]
model_pred_high = np.quantile(sample_daily, 0.975, axis=0)[:, j]
covered = np.mean((seir_label >= model_pred_low)[:test_len + 1] & (seir_label <= model_pred_high)[:test_len + 1])
length = np.mean((model_pred_high - model_pred_low)[:test_len + 1])
# crps
q = [0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3,
0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75,
0.8, 0.85, 0.9, 0.95, 0.975, 0.99]
model_quantile = np.quantile(sample_daily, q, axis=0)[:, :, j]
crps_list0 = list()
for k in range(model_quantile.shape[1]):
pred = model_quantile[:, k]
proba = q.copy()
less_ind = pred < seir_label[k]
proba_label = np.ones_like(proba)
proba_label[less_ind] = 0
crps_list0.append(np.mean((proba_label - proba) ** 2))
crps_list.append(np.mean(np.array(crps_list0)))
pred_low_list.append(model_pred_low)
pred_high_list.append(model_pred_high)
covered_list.append(covered)
length_list.append(length)
test_date = data_dict['date_list'][test_len - days].date()
train_end_date = data_dict['date_list'][train_len].date()
df_ours = pds.DataFrame(
{'countries': countries, 'best_err': best_error_list, 'best_length': length_list, 'crps': crps_list,
'best_seed': best_seed, 'best_model': best_model})
eval_list = [
'Denmark',
'Italy',
'Germany',
'Spain',
'United Kingdom',
'France',
'Belgium',
'Austria',
'Sweden',
'Switzerland',
'Portugal',
'Netherlands',
'Brazil',
'US'
]
df_save = df_ours[df_ours.countries.isin(eval_list)]
df_save.to_csv('tables/Table-2-cgp-countries-14d.csv')
# ## Get benchmarks
from datetime import timedelta
test_len = 14
model_list = []
err_list = []
length_list = []
cprs_list = []
eval_days = [str(train_end_date + timedelta(days=x)) for x in range(test_len)]
pred_name = [str(x) + ' day ahead inc death' for x in range(1, test_len + 2)]
loc = countries.index('US')
seir_label = data_dict['daily_death'][train_len:, loc].numpy()[:test_len]
seir_label
# ### LANL
file_list = ['covid19hub/benchmark-04-25/' + '2020-04-23-LANL-GrowthRate.csv']
f = file_list[0]
m = 'LANL'
model_list.append(m)
df_bench = pds.read_csv(f)
df_test = df_bench[
(df_bench.type == 'point') & (df_bench.target.isin(pred_name)) & (df_bench.location.isin(range(1, 57)))]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
# predictions not available for 2020-05-03
if test_len == 14:
seir_label2 = np.concatenate([seir_label[:8], seir_label[9:]])
else:
seir_label2 = seir_label
err = np.mean(np.abs(df_test.value.values - seir_label2))
err_list.append(err)
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.025) & (
df_bench.location.isin(range(1, 57)))]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_low = df_test[df_test.target_end_date.isin(eval_days)]
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.975) & (
df_bench.location.isin(range(1, 57)))]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_high = df_test[df_test.target_end_date.isin(eval_days)]
length = np.mean(df_high.value.values - df_low.value.values)
length_list.append(length)
cprs_list.append(np.nan)
# ### Imperial
file_list = ['covid19hub/benchmark-04-25/' + '2020-04-26-Imperial-ensemble2.csv']
f = file_list[0]
m = 'Imperial'
model_list.append(m)
df_bench = pds.read_csv(f)
df_test = df_bench[(df_bench.type == 'point') & (df_bench.target.isin(pred_name))]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
if test_len == 7:
seir_label2 = seir_label[2:]
else:
seir_label2 = seir_label[2:9]
err = np.mean(np.abs(df_test.value.values - seir_label2))
err_list.append(err)
df_test = df_bench[(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.025)]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_low = df_test[df_test.target_end_date.isin(eval_days)]
df_test = df_bench[(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.975)]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_high = df_test[df_test.target_end_date.isin(eval_days)]
length = np.mean(df_high.value.values - df_low.value.values)
length_list.append(length)
## crps
seir_label2 = seir_label[2:9]
df_test = df_bench[(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name))]
df_test = df_test[['target_end_date', 'value', 'quantile']].groupby(['target_end_date', 'quantile']).sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
df_test.head()
avail_days = df_test.target_end_date.unique()
day = avail_days[0]
i = 0
crps_list = []
for i, day in enumerate(avail_days):
df_sub = df_test[df_test['target_end_date'] == day]
pred = df_sub.value.values
proba = df_sub['quantile'].values
less_ind = pred < seir_label2[i]
proba_label = np.ones_like(proba)
proba_label[less_ind] = 0
crps_list.append(np.mean((proba_label - proba) ** 2))
np.mean(np.array(crps_list))
cprs_list.append(np.mean(np.array(crps_list)))
# ### IHME
file_list = ['covid19hub/benchmark-04-25/' + '2020-04-27-IHME-CurveFit.csv']
f = file_list[0]
m = 'IHME'
model_list.append(m)
df_bench = pds.read_csv(f)
df_bench['quantile'].unique()
df_test = df_bench[(df_bench.type == 'point') & (df_bench.target.isin(pred_name)) & (df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
mean = df_test.value.values
seir_label2 = seir_label[3:]
actual = seir_label2
err = np.mean(np.abs(df_test.value.values - seir_label2))
err_list.append(err)
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.025) & (
df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_low = df_test[df_test.target_end_date.isin(eval_days)]
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.975) & (
df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_high = df_test[df_test.target_end_date.isin(eval_days)]
sd = (df_high.value.values - df_low.value.values) / (1.96 * 2)
length = np.mean(df_high.value.values - df_low.value.values)
length_list.append(length)
crps_list0 = list()
for k in range(len(sd)):
pred = norm.ppf(q, loc=mean[k], scale=sd[k])
proba = q.copy()
less_ind = pred < actual[k]
proba_label = np.ones_like(proba)
proba_label[less_ind] = 0
crps_list0.append(np.mean((proba_label - proba) ** 2))
np.mean(np.array(crps_list0))
cprs_list.append(np.mean(np.array(crps_list0)))
# ### MIT-DELPHI
file_list = ['covid19hub/benchmark-04-25/' + '2020-04-27-MIT_CovidAnalytics-DELPHI.csv']
f = file_list[0]
m = 'MIT-DELPHI'
model_list.append(m)
df_bench = pds.read_csv(f)
cum_name = [str(x) + ' day ahead cum death' for x in range(1, test_len + 2)]
df_test = df_bench[(df_bench.type == 'point') & (df_bench.target.isin(cum_name))]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
seir_label2 = seir_label[4:]
err = np.mean(np.abs(np.diff(df_test.value.values) - seir_label2))
err_list.append(err)
length_list.append(np.nan)
cprs_list.append(np.nan)
# ### YYG
file_list = ['covid19hub/benchmark-04-25/' + '2020-04-27-YYG-ParamSearch.csv']
f = file_list[0]
m = 'YYG'
model_list.append(m)
df_bench = pds.read_csv(f)
df_bench['quantile'].unique()
df_test = df_bench[(df_bench.type == 'point') & (df_bench.target.isin(pred_name)) & (df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_test = df_test[df_test.target_end_date.isin(eval_days)]
if test_len == 7:
seir_label2 = seir_label[3:]
else:
seir_label2 = seir_label[3:10]
err = np.mean(np.abs(df_test.value.values - seir_label2))
err_list.append(err)
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.025) & (
df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_low = df_test[df_test.target_end_date.isin(eval_days)]
df_test = df_bench[
(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench['quantile'] == 0.975) & (
df_bench.location_name == 'US')]
df_test = df_test[['target_end_date', 'value']].groupby('target_end_date').sum().reset_index()
df_high = df_test[df_test.target_end_date.isin(eval_days)]
length = np.mean(df_high.value.values - df_low.value.values)
length_list.append(length)
## crps
seir_label2 = seir_label[3:10]
df_test = df_bench[(df_bench.type == 'quantile') & (df_bench.target.isin(pred_name)) & (df_bench.location_name == 'US')]
df_test = df_test[df_test.target_end_date.isin(eval_days)]
avail_days = df_test.target_end_date.unique()
day = avail_days[0]
i = 0
crps_list = []
df_sub = df_test[df_test['target_end_date'] == day]
pred = df_sub.value.values
proba = df_sub['quantile'].values
less_ind = pred < seir_label2[i]
proba_label = np.ones_like(proba)
proba_label[less_ind] = 0
crps_list.append(np.mean((proba_label - proba) ** 2))
np.mean(np.array(crps_list))
cprs_list.append(np.mean(np.array(crps_list)))
err_df = pds.DataFrame({
'model': model_list,
'error': err_list,
'length': length_list,
'cprs': cprs_list,
'eval_date': test_date,
'forecast_date': train_end_date
})
err_df.to_csv('tables/benchmark-US-14d.csv')
|
<reponame>BixinKey/electrum
import abc
import base64
import logging
from typing import Any, Dict, Iterable, List
from pycoin.coins.bitcoin import Tx as pycoin_tx
from trezorlib import btc as trezor_btc
from trezorlib import messages as trezor_messages
from electrum_gui.common.basic import bip44
from electrum_gui.common.coin import data as coin_data
from electrum_gui.common.hardware import interfaces as hardware_interfaces
from electrum_gui.common.provider import data, interfaces
from electrum_gui.common.provider.chains.btc.clients import blockbook
from electrum_gui.common.provider.chains.btc.sdk import transaction
logger = logging.getLogger("app.chain")
class BTCHardwareMixin(interfaces.HardwareSupportingMixin, abc.ABC):
chain_info: coin_data.ChainInfo
@property
@abc.abstractmethod
def network(self) -> Any:
pass
@property
@abc.abstractmethod
def tx_version(self) -> int:
pass
@property
@abc.abstractmethod
def tx_op_return_size_limit(self) -> int:
pass
@property
@abc.abstractmethod
def client(self) -> blockbook.BlockBook:
pass
def hardware_get_xpub(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
bip44_path: bip44.BIP44Path,
confirm_on_device: bool = False,
) -> str:
script_type = _get_hardware_input_secret_type_from_bip44_path(bip44_path)
return trezor_btc.get_public_node(
hardware_client,
n=bip44_path.to_bip44_int_path(),
show_display=confirm_on_device,
coin_name=self.chain_info.name,
script_type=script_type,
).xpub
def hardware_get_address(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
bip44_path: bip44.BIP44Path,
confirm_on_device: bool = False,
) -> str:
script_type = _get_hardware_input_secret_type_from_bip44_path(bip44_path)
address = trezor_btc.get_address(
hardware_client,
coin_name=self.chain_info.name,
n=bip44_path.to_bip44_int_path(),
show_display=confirm_on_device,
script_type=script_type,
)
return address
def _collect_raw_txs(self, txids: Iterable[str]) -> Dict[str, str]:
raw_txs = {}
for txid in txids:
tx = self.client.get_transaction_by_txid(txid)
raw_txs[txid] = tx.raw_tx
return raw_txs
def hardware_sign_transaction(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
unsigned_tx: data.UnsignedTx,
bip44_path_of_signers: Dict[str, bip44.BIP44Path],
) -> data.SignedTx:
prev_txids = set(i.utxo.txid for i in unsigned_tx.inputs)
prev_raw_txs = self._collect_raw_txs(prev_txids)
prev_txs = {bytes.fromhex(txid): _build_prev_tx(self.network, raw_tx) for txid, raw_tx in prev_raw_txs.items()}
inputs = _build_hardware_inputs(unsigned_tx.inputs, bip44_path_of_signers)
outputs = _build_hardware_outputs(unsigned_tx.outputs, unsigned_tx.payload, self.tx_op_return_size_limit)
# noinspection PyTypeChecker
_, raw_tx_bytes = trezor_btc.sign_tx(
hardware_client, self.chain_info.name, inputs, outputs, prev_txes=prev_txs, version=self.tx_version
)
tx: pycoin_tx.Tx = self.network.tx.from_bin(raw_tx_bytes)
spendables = transaction.create_spendables_from_inputs(self.network, unsigned_tx.inputs)
tx.set_unspents(spendables)
self._check_tx_after_signed(tx)
return data.SignedTx(txid=tx.id(), raw_tx=tx.as_hex())
@abc.abstractmethod
def _check_tx_after_signed(self, tx: pycoin_tx.Tx):
pass
def hardware_sign_message(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
message: str,
signer_bip44_path: bip44.BIP44Path,
) -> str:
script_type = _get_hardware_input_secret_type_from_bip44_path(signer_bip44_path)
signature_bytes = trezor_btc.sign_message(
hardware_client,
coin_name=self.chain_info.name,
n=signer_bip44_path.to_bip44_int_path(),
message=message,
script_type=script_type,
).signature
return base64.b64encode(signature_bytes).decode()
def hardware_verify_message(
self,
hardware_client: hardware_interfaces.HardwareClientInterface,
address: str,
message: str,
signature: str,
) -> bool:
signature_bytes = base64.b64decode(signature)
return trezor_btc.verify_message(
hardware_client,
coin_name=self.chain_info.name,
address=address,
signature=signature_bytes,
message=message,
)
def _build_hardware_inputs(
inputs: List[data.TransactionInput], bip44_path_of_signers: Dict[str, bip44.BIP44Path]
) -> List[trezor_messages.TxInputType]:
hardware_inputs = []
for i in inputs:
bip44_path = bip44_path_of_signers[i.address]
script_type = _get_hardware_input_secret_type_from_bip44_path(bip44_path)
hardware_input = dict(
script_type=script_type,
address_n=bip44_path.to_bip44_int_path(),
prev_hash=bytes.fromhex(i.utxo.txid),
prev_index=int(i.utxo.vout),
amount=int(i.utxo.value),
)
hardware_inputs.append(trezor_messages.TxInputType(**hardware_input))
return hardware_inputs
def _build_hardware_outputs(
outputs: List[data.TransactionOutput], payload: dict, op_return_size_limit: int
) -> List[trezor_messages.TxOutputType]:
hardware_outputs = []
for i in outputs:
hardware_output = dict(amount=int(i.value))
is_change = i.payload.get("is_change", False)
bip44_path_str = i.payload.get("bip44_path", None)
if is_change and bip44_path_str:
bip44_path = bip44.BIP44Path.from_bip44_path(bip44_path_str)
script_type = _get_hardware_output_secret_type_from_bip44_path(bip44_path)
hardware_output.update(
dict(
script_type=script_type,
address_n=bip44_path.to_bip44_int_path(),
)
)
else:
hardware_output.update(dict(script_type=trezor_messages.OutputScriptType.PAYTOADDRESS, address=i.address))
hardware_outputs.append(trezor_messages.TxOutputType(**hardware_output))
if payload and payload.get("op_return"):
op_return: bytes = payload["op_return"].encode()
if len(op_return) > op_return_size_limit:
logger.warning(
f"OP_RETURN exceed limit for hardware. "
f"op_return_size_limit: {op_return_size_limit}, now got: {len(op_return)}"
)
op_return = op_return[:op_return_size_limit]
hardware_outputs.append(
trezor_messages.TxOutputType(
amount=0, script_type=trezor_messages.OutputScriptType.PAYTOOPRETURN, op_return_data=op_return
)
)
return hardware_outputs
def _build_prev_tx(network, raw_tx: str) -> trezor_messages.TransactionType:
tx: pycoin_tx.Tx = network.tx.from_hex(raw_tx)
hardware_tx = trezor_messages.TransactionType()
hardware_tx.version = int(tx.version)
hardware_tx.lock_time = int(tx.lock_time)
hardware_tx.inputs = [
trezor_messages.TxInputType(
prev_hash=tx_in.previous_hash[::-1],
prev_index=tx_in.previous_index,
script_sig=tx_in.script,
sequence=tx_in.sequence,
)
for tx_in in tx.txs_in
]
hardware_tx.bin_outputs = [
trezor_messages.TxOutputBinType(amount=tx_out.coin_value, script_pubkey=tx_out.script) for tx_out in tx.txs_out
]
return hardware_tx
def _get_hardware_input_secret_type_from_bip44_path(bip44_path: bip44.BIP44Path) -> int:
purpose = bip44_path.index_of(bip44.BIP44Level.PURPOSE)
if purpose in (84, 48):
script_type = trezor_messages.InputScriptType.SPENDWITNESS
elif purpose == 49:
script_type = trezor_messages.InputScriptType.SPENDP2SHWITNESS
elif purpose == 44:
script_type = trezor_messages.InputScriptType.SPENDADDRESS
else:
raise Exception(f"Invalid purpose: {purpose}")
return script_type
def _get_hardware_output_secret_type_from_bip44_path(bip44_path: bip44.BIP44Path) -> int:
purpose = bip44_path.index_of(bip44.BIP44Level.PURPOSE)
if purpose in (84, 48):
script_type = trezor_messages.OutputScriptType.PAYTOWITNESS
elif purpose == 49:
script_type = trezor_messages.OutputScriptType.PAYTOP2SHWITNESS
elif purpose == 44:
script_type = trezor_messages.OutputScriptType.PAYTOADDRESS
else:
raise Exception(f"Invalid purpose: {purpose}")
return script_type
|
<gh_stars>0
from kivy.lang import Builder
from kivy.uix.scrollview import ScrollView
from kivy.uix.label import Label
from kivy.metrics import sp
from kivy.properties import (
StringProperty, ObjectProperty, BooleanProperty,
NumericProperty, ListProperty
)
from json import dumps
from kivy_modules.behavior.textbehavior import Prettify
class FlexLabel(Label):
font_size = NumericProperty(sp(15))
bold = BooleanProperty(False)
radius = ListProperty([0])
halign = StringProperty('center')
valign = StringProperty('middle')
color = ListProperty([.2,.2,.2,1])
fg_color = ListProperty([.2,.2,.2,1])
bg_color = ListProperty([1,1,1,1])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fg_color = kwargs.get("fg_color", self.color)
self.bg_color = kwargs.get("bg_color", self.bg_color)
def on_fg_color(self, obj, val):
self.color = val
class FlexPrettyLabel(FlexLabel, Prettify):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class ScrollableLabel(ScrollView):
text = StringProperty('')
ref_press = ObjectProperty(None)
markup = BooleanProperty(False)
__events__ = ['on_ref_press']
radius = ListProperty([0])
def __init__(self, **kwargs):
super().__init__(**kwargs)
# ~ Clock.schedule_once(self.start, .5)
# ~ def start(self, *args):
# ~ self.children[0].ref_press = on_ref_press
def on_touch_down(self, touch):
label = self.children[0]
if super(Label, label).on_touch_down(touch):
return True
if not len(label.refs):
return False
tx, ty = touch.pos
tx -= label.center_x - label.texture_size[0] / 2.
ty -= label.center_y - label.texture_size[1] / 2.
ty = label.texture_size[1] - ty
for uid, zones in label.refs.items():
for zone in zones:
x, y, w, h = zone
if x <= tx <= w and y <= ty <= h:
self.dispatch('on_ref_press', uid)
return True
return False
def on_ref_press(self, ref):
print(ref)
class PrettyLabel(ScrollView):
"""label with a pretty representation of text,
inspired by pprint module"""
text = StringProperty('')
padding = ListProperty([5,5])
radius = ListProperty([0])
do_scroll_y = BooleanProperty(True)
do_scroll_x = BooleanProperty(True)
# scroll_type = ListProperty(['bars'])
effect_cls = StringProperty('ScrollEffect')
bar_width = NumericProperty(10)
bar_color = ListProperty([.2, .2, 1, 1])
fg_color = ListProperty([.2, .2, .2, 1])
bg_color = ListProperty([1,1,1,1])
orientation = StringProperty("vertical")
def init(self, *args, **kwargs):
super(PrettyLabel, self).init(*args, **kwargs)
self.radius = kwargs.get("radius", self.radius)
def pprint(self, obj, sort_keys = False, indent = 4):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
self.text = dumps(
obj, sort_keys = sort_keys,
indent = indent, separators = (',', ': ')
)
def write(self, text):
self.text += text
Builder.load_string('''
<ScrollableLabel>:
Label:
size_hint_y: None
height: self.texture_size[1]
text_size: self.width, None
text: root.text
markup: True
<FlexLabel>:
fg_color: root.fg_color
bg_color: root.bg_color
text_size: self.size
canvas.before:
Color:
rgba: root.bg_color
RoundedRectangle:
pos: root.pos
size: root.size
radius: root.radius
<FlexPrettyLabel>:
text_size: root.size
<PrettyLabel>:
FlexLabel:
color: root.fg_color
size_hint: [None,None]
size: [0,0]
width: dp(len(self.text)*2) if root.orientation == "horizontal" else root.width #self.texture_size[0]
height: dp(len(self.text)) if root.orientation == "vertical" else root.height #self.texture_size[1]
text_size: [self.width, self.height]
padding: root.padding
text: root.text
markup: True
canvas.before:
Color:
rgba: root.bg_color
RoundedRectangle:
pos: self.pos
size: self.size
radius: root.radius
''')
if __name__ == "__main__":
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class LabelApp(App):
def build(self):
text = ["hello","world"]*10
box = BoxLayout(orientation="vertical")
box.add_widget(FlexLabel(text=str(text)))
box.add_widget(FlexPrettyLabel(data=text))
box.add_widget(ScrollableLabel(text=str(text)))
box.add_widget(PrettyLabel(text=str(text), radius=[30],
orientation="vertical"))
return box
LabelApp().run() |
<filename>shard/config/database_config.py<gh_stars>10-100
import copy
import urllib.parse
from typing import Dict, List, Optional, Tuple
from dj_database_url import config
from shard.constants import DATABASE_CONFIG_MASTER, DATABASE_CONFIG_SHARD_GROUP, DATABASE_CONFIG_SHARD_NUMBER
__all__ = ('make_shard_configuration', 'make_replication_configuration', )
def make_shard_configuration(
shard_group: str, database_name: str, logical_count: int, conn_max_age: int, shards: List, options: Dict
) -> Dict:
configuration = {}
shard_count = len(shards)
logical_count_per_shard = _get_logical_count_per_shard(logical_count=logical_count, shard_count=shard_count)
for index, shard in enumerate(shards):
master_shard = shard['master']
slave_shards = shard.get('slaves', [])
start, end = _get_logical_range(index, logical_count_per_shard)
for logical_index in range(start, end):
shard_name = _make_shard_name(shard_group=shard_group, shard_count=shard_count, shard_index=index, logical_index=logical_index)
_master_shard, _slave_shards = _make_shard_config(master_shard, slave_shards, database_name, logical_index, options)
configuration.update(make_replication_configuration(
key=shard_name, master=_master_shard, slaves=_slave_shards, conn_max_age=conn_max_age, shard_info={
'shard_group': shard_group, 'shard_number': logical_index}
))
return configuration
def make_replication_configuration(key: str, master: Dict, slaves: List[Dict], conn_max_age: int, shard_info: Optional[Dict]=None) -> Dict:
configuration = {}
if shard_info is None:
shard_info = {}
configuration[key] = _generate_database_config(
database_url=master['url'], conn_max_age=master.get('conn_max_age', conn_max_age), **shard_info
)
for index, slave in enumerate(slaves):
slave_key = '%s_slave_%d' % (key, index)
configuration[slave_key] = _generate_database_config(
database_url=slave['url'], conn_max_age=slave.get('conn_max_age', conn_max_age), is_replica_of=key
)
return configuration
def _make_shard_config(master_shard: Dict, slave_shards: List, database_name: str, logical_index: int, options: Dict) -> Tuple[Dict, List]:
_master_shard = copy.deepcopy(master_shard)
_master_shard.update({
'url': _make_shard_database_url(
origin=master_shard['url'], database_name=database_name, logical_index=logical_index,
db_options={**options, **(_master_shard.get('options', {}))}
),
})
_slave_shards = copy.deepcopy(slave_shards)
for slave_shard in _slave_shards:
slave_shard.update({
'url': _make_shard_database_url(
origin=slave_shard['url'], database_name=database_name, logical_index=logical_index,
db_options={**options, **(slave_shard.get('options', {}))}
)
})
return _master_shard, _slave_shards
def _generate_database_config(
database_url: str, conn_max_age: int, is_replica_of: Optional[str]=None, shard_group: Optional[str]=None,
shard_number: Optional[int]=None
) -> Dict:
db_config = config(default=database_url, conn_max_age=conn_max_age)
if is_replica_of:
db_config[DATABASE_CONFIG_MASTER] = is_replica_of
if shard_group is not None and shard_number is not None:
db_config[DATABASE_CONFIG_SHARD_GROUP] = shard_group
db_config[DATABASE_CONFIG_SHARD_NUMBER] = shard_number
return db_config
def _get_logical_count_per_shard(logical_count: int, shard_count: int) -> int:
return int(logical_count / shard_count)
def _get_logical_range(shard_index: int, logical_count_per_shard: int) -> Tuple[int, int]:
start = shard_index * logical_count_per_shard
end = (shard_index + 1) * logical_count_per_shard
return start, end
def _make_shard_database_url(origin: str, database_name: str, logical_index: int, db_options: Optional[Dict]=None) -> str:
if origin == 'sqlite://:memory:':
return origin
database_name = '%s_%d' % (database_name, logical_index)
database_url = '%s%s' % (origin, database_name)
if db_options:
database_url = database_url + '?' + urllib.parse.urlencode(db_options)
return database_url
def _make_shard_name(shard_group: str, shard_count: int, shard_index: int, logical_index: int) -> str:
return '%s_%d_%d_%d' % (shard_group, shard_count, shard_index, logical_index)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 19 06:54:04 2020
@author: <NAME>
## TARNSFERRED TO PYQT GUI
"""
import time, datetime
import requests#, threading
import sys
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow,\
QPushButton, QWidget, QFormLayout, \
QLineEdit, QToolBar, QStatusBar, \
QVBoxLayout, QTextEdit
import math
from twisted.internet import reactor
import numpy as np
import os
import _pickle as cPickle
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
Logo = resource_path("logo.png")
#1 EXTERNAL LIBRARIES
from binance_api import BinanceSocketManager
from binance_api import BinanceAPIException
from lib_pump_dump import AnotherWindow, AnotherWindowDynamicBTC,\
AnotherWindowDynamicFS, AnotherWindowDynamic, \
AnotherWindowConfig, AnotherWindowpnl, AnotherWindowtrade
BOT_START_TIME = time.time() #in ms
frame_title = "Binance bot- Alpha"
#%% POST TREATMENT MODULE
class currency_container:
def __init__(self, currencyArray, candle_len=10, mode = 'last_price'):
# v is quote volue (BTC), q is base value (USDT)
self.symbol = currencyArray['s']
initial_timestamp = time.time()
self.time_stamp = initial_timestamp
self.time_stamp_reset = initial_timestamp
self.volume24hr = 0.0
if mode == 'market':
key = 'p'
elif mode == 'last_price':
key = 'c'
keyV = "v"
self.volume24hr = float(currencyArray[keyV])
elif mode == 'bid':
key = 'b'
elif mode == 'ask':
key = 'a'
self.bid_price = float(currencyArray[key])
self.price_time = [1.0 * float(currencyArray[key]) for _ in range(candle_len)]
if mode == 'bid_ask':
self.bid_price = (float(currencyArray['b']) + float(currencyArray['a'])) /2.
self.price_time = [1.0 * (float(currencyArray['b']) + float(currencyArray['a'])) /2. for _ in range(candle_len)]
self.time_stamp_period = [1.0 * initial_timestamp for _ in range(candle_len)]
### single price changes for different candles
self.percent_chgsP = 0.0
self.profit_percentP = 0.0
class Window(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
QMainWindow.__init__(self)
app_icon = QtGui.QIcon()
app_icon.addFile('logo.png', QtCore.QSize(16,16))
self.setWindowIcon(app_icon)
self.nb_trades = 0
self.list_of_trades = []
try:
with open("active_trade.pickle", "rb") as input_file:
self.trades_completed = cPickle.load(input_file)
except:
self.trades_completed = {}
# TODO dump trades_completed and load to keep track
self.new_list = {}
self.indicator = 'none'
self.enabledT = False
self.api = None
self.state = 0
self._sockets = {}
self.popup_cnt = 0
self.popup_cnt1 = 0
self.running = False
self.setWindowTitle(frame_title)
# self._createMenu()
self._createToolBar()
self._createStatusBar()
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self._centralWidget = QWidget(self)
self.setCentralWidget(self._centralWidget)
self._centralWidget.setLayout(self.layout)
self._createDisplay() ## display screen
self.setDisplayText(frame_title)
self.setDisplayText("GUI initialized! \nPlease configure bot settings first\n")
self._formLayout() ## buttons and layout
self.popups = []
self.timer1temp = QtCore.QTimer()
self.timer1temp.setInterval(int(5000))
self.timer1temp.timeout.connect(self.update_)
self.timer1temp.start()
def update_(self,):
with open("active_trade.pickle", "wb") as output_file:
cPickle.dump(self.trades_completed, output_file)
def closeEvent(self, event):
# Return stdout to defaults.
try:
reactor.stop()
self.on_manual_sell()
except:
pass
self.close
QApplication.closeAllWindows()
super().closeEvent(event)
def _createDisplay(self):
"""Create the display."""
# Create the display widget
self.display = QTextEdit()
self.display.setReadOnly(True)
# Add the display to the general layout
self.layout.addWidget(self.display)
def setDisplayText(self, text):
self.display.append('%s'%text)
self.display.moveCursor(QtGui.QTextCursor.End)
self.display.setFocus()
def _createMenu(self):
self.menu = self.menuBar().addMenu("&Menu")
self.menu.addAction('&Exit', self.close)
def _createToolBar(self):
self.tools = QToolBar()
self.addToolBar(self.tools)
self.trialtoolbar1 = self.tools.addAction('Price change plot', self.show_new_window)
self.trialtoolbar2 = self.tools.addAction('Dynamic plot', self.show_new_window_dynamic)
self.trialtoolbar4 = self.tools.addAction('Futures stat', self.show_new_window_dynamicFS)
self.trialtoolbar3 = self.tools.addAction('Stats (several exchange)', self.show_new_window_dynamicBTC)
self.trialtoolbar1.setEnabled(False)
self.trialtoolbar2.setEnabled(False)
self.trialtoolbar4.setEnabled(False)
# self.trialtoolbar31.setEnabled(False)
def show_new_window_nimpl(self):
self.write_to_console("Functions not available as of now; in development", to_push=1)
def show_new_windowtrade(self, trades_completed):
w9 = AnotherWindowtrade(self.api, self.exchange, trades_completed)
w9.show()
self.popups.append(w9)
def show_new_window(self):
w = AnotherWindow(self.api, self.exchange)
w.show()
self.popups.append(w)
def show_new_windowpnl(self):
w123 = AnotherWindowpnl(self.api, self.exchange, BOT_START_TIME)
w123.got_signal_socket.connect(self.postprocesspnl)
w123.show()
self.popups.append(w123)
def show_new_window_dynamicBTC(self):
w165 = AnotherWindowDynamicBTC(self.setDisplayText)
w165.got_signal.connect(self.postprocessliq)
w165.show()
self.popups.append(w165)
def show_new_window_dynamicFS(self):
w1 = AnotherWindowDynamicFS(self.api)
w1.got_text.connect(self.postprocessFS)
w1.show()
self.popups.append(w1)
def show_new_window_dynamic(self):
w175 = AnotherWindowDynamic(self.api, self.exchange)
w175.show()
self.popups.append(w175)
def show_new_window_config(self):
w2 = AnotherWindowConfig(self.api, state=self.state)
w2.got_password.connect(self.postprocess)
w2.show()
self.state = self.state +1
self.popups.append(w2)
def postprocessFS(self, emit_dict):
coins_ = np.array(emit_dict["coin"]).flatten()
timeframe_ = np.array(emit_dict['TimeFrame']).flatten()
trades1 = np.array(emit_dict['topLongShortAccountRatio']).flatten()
trades2 = np.array(emit_dict['topLongShortPositionRatio']).flatten()
trades3 = np.array(emit_dict['globalLongShortAccountRatio']).flatten()
trades4 = np.array(emit_dict['takerlongshortRatio']).flatten()
# every 5min and 1hour
ind_ = trades1.argsort()[-5:][::-1]
for i in ind_:
line = coins_[i]+" has HIGHEST Long/Short ACCOUNT ratio in "+\
timeframe_[0]+" timeframe with "+\
str(trades1[i])
self.write_to_console(line, to_push=1)
self.write_to_console("\n", to_push=1)
ind_ = trades2.argsort()[-5:][::-1]
for i in ind_:
line = coins_[i]+\
" has HIGHEST Long/Short POSITION ratio in "+\
timeframe_[0]+" timeframe with "+\
str(trades2[i])
self.write_to_console(line, to_push=1)
self.write_to_console("\n", to_push=1)
ind_ = trades3.argsort()[-5:][::-1]
for i in ind_:
line = coins_[i]+\
" has HIGHEST Long/Short GLOBAL ACCOUNT ratio in "+\
timeframe_[0]+" timeframe with "+\
str(trades3[i])
self.write_to_console(line, to_push=1)
self.write_to_console("\n", to_push=1)
ind_ = trades4.argsort()[-5:][::-1]
for i in ind_:
line = coins_[i]+\
" has HIGHEST BUY/SELL volume ratio in "+\
timeframe_[0]+" timeframe with "+\
str(trades4[i])
self.write_to_console(line, to_push=1)
self.write_to_console("\n", to_push=1)
def postprocessliq(self, emit_dict):
self.lim_trades = self.lim_trades + emit_dict["limtrades"]
self.text1 = ["adding a shift in the market based on BTC movements"]
self.usdt_addfunds = emit_dict["safety"]
self.usdt_invest = emit_dict["investment"]
if emit_dict["direction"] == "LONG":
self.indicator = 'long'
self.cb_strategy.setText("LONG")
else:
self.indicator = 'short'
self.cb_strategy.setText("SHORT")
self.cb_strategy.setReadOnly(True)
# print status
for line in self.text1:
self.write_to_console(line, to_push=1)
def postprocesspnl(self, emit_dict):
symbol = emit_dict["signal"]
if symbol == "SOS":
tempp = self.api.futures_get_open_orders()
ss = []
for j in tempp:
ss.append(j['symbol'])
ss = np.unique(ss)
for i in ss:
try:
_ = self.api.futures_cancel_all_open_orders(symbol=i)
except:
pass
line = "SOS Liquidation approaching; cancelling all open orders"
self.write_to_console(line, to_push=1)
else:
try:
_ = self.api.futures_cancel_all_open_orders(symbol=symbol)
except:
line = "No open orders exists for "+symbol
self.write_to_console(line, to_push=1)
try:
self.trades_completed[symbol]["trade_status"] = "finished"
except:
line = symbol + " doesn't exist in trade database; not managed by bot"
self.write_to_console(line, to_push=1)
self.update_()
self.stop_ticker_symbol(symbol)
def postprocess(self, emit_dict):
self.text1 = emit_dict["text1"]
self.live_trade = emit_dict["live_trade"]
self.take_profit = emit_dict["take_profit"]
self.enabledT = emit_dict["enabledT"]
self.bot_chatID = emit_dict["bot_chatID"]
self.bot_token = emit_dict["bot_token"]
self.ttp = emit_dict["ttp"]
self.lim_trades = emit_dict["lim_trades"]
self.profit_percent = emit_dict["profit_percent"]
self.take_profit_trailing = emit_dict["take_profit_trailing"]
self.safety_trade_percent = emit_dict["safety_trade_percent"]
self.usdt_addfunds = emit_dict["usdt_addfunds"]
self.usdt_invest = emit_dict["usdt_invest"]
self.leverage = emit_dict["leverage"]
self.lim_trades_per_coin = emit_dict["lim_trades_per_coin"]
self.trade_per_coin = emit_dict["trade_per_coin"]
self.coins = emit_dict["coins"]
self.black_list = emit_dict["black_list"]
self.api = emit_dict["binance_client"]
bin_key = emit_dict["binance_key"]
bin_secret = emit_dict["binance_secret"]
self.price_analysis_mode = emit_dict["price_analysis_mode"]
self.candlesP = emit_dict["candlesP"]
self.is_exchange_market = emit_dict["is_exchange_market"]
self.is_order_market = emit_dict["is_order_market"]
self.basecurrency = emit_dict["basecurrency"]
self.mode_analysis = emit_dict["mode_analysis"]
if self.mode_analysis == "Automatic":
self.price_pd.setEnabled(False)
self.price_dp.setEnabled(False)
else:
self.price_pd.setEnabled(True)
self.price_dp.setEnabled(True)
if self.is_exchange_market:
self.exchange = "FUTURES"
if self.is_order_market:
self.indicator = 'long'
else:
self.indicator = 'short'
self.cb_exchange.setText("Binance Futures")
else:
self.exchange = "SPOT"
self.leverage = 1
self.indicator = 'long' # only long is allowed in spot
self.cb_exchange.setText("Binance Spot")
self.cb_exchange.setReadOnly(True)
if self.is_order_market:
self.cb_strategy.setText("LONG")
else:
self.cb_strategy.setText("SHORT")
self.cb_strategy.setReadOnly(True)
self.base_currencys.setText(self.basecurrency)
self.base_currencys.setReadOnly(True)
self.temp01.setText(str(self.live_trade))
self.temp01.setReadOnly(True)
self.temp02.setText(str(self.enabledT))
self.temp02.setReadOnly(True)
if bin_key != None and bin_secret != None:
self.api_key_entry.setText(bin_key)
self.api_key_entry.setEchoMode(QLineEdit.EchoMode.Password)
self.api_key_entry.setReadOnly(True)
self.api_secret_entry.setText(bin_secret)
self.api_secret_entry.setEchoMode(QLineEdit.EchoMode.Password)
self.api_secret_entry.setReadOnly(True)
# print status
for line in self.text1:
self.write_to_console(line, to_push=1)
def _createStatusBar(self):
self.status = QStatusBar()
self.status.showMessage("Bot status will be shown here")
self.setStatusBar(self.status)
def _formLayout(self):
self.formLayout = QFormLayout()
self.temp01 = QLineEdit()
self.temp02 = QLineEdit()
self.cb_exchange = QLineEdit()
self.cb_strategy = QLineEdit()
self.base_currencys = QLineEdit()
# button for binance exchange connection
self.btn = QPushButton('Connect to Exchange')
self.btn.clicked.connect(self.on_connect_api)
# self.btn.setEnabled(False)
# button for bot start
self.btn_bstart = QPushButton('Start bot')
self.btn_bstart.clicked.connect(self.on_pump)
# button for bot stop
self.btn_bstop = QPushButton('Stop bot')
self.btn_bstop.clicked.connect(self.on_manual_sell)
self.btn_bstop.setEnabled(False)
self.btn_bstoptp = QPushButton('Stop TP')
self.btn_bstoptp.clicked.connect(self.stop_tp_sockets)
self.btn_bstoptp.setEnabled(False)
self.btn_config_trial = QPushButton('Configure bot settings (static and dynamic)')
self.btn_config_trial.clicked.connect(self.show_new_window_config)
## api key and secret Qline
self.api_key_entry = QLineEdit()
self.api_secret_entry = QLineEdit()
self.price_pd = QLineEdit() # auto_sell_spinbox
self.price_dp = QLineEdit() # stop_loss_spinbox
self.price_pd.setText("1.2")
self.price_dp.setText("10")
self.formLayout.addRow(self.btn_config_trial)
self.formLayout.addRow('Exchange type:', self.cb_exchange)
self.formLayout.addRow('Order strategy:', self.cb_strategy)
self.formLayout.addRow('Trading currency:', self.base_currencys)
self.formLayout.addRow('Exchange API key:', self.api_key_entry)
self.formLayout.addRow('Exchange API secret:', self.api_secret_entry)
self.formLayout.addRow('Live trade:', self.temp01)
self.formLayout.addRow('Telegram:', self.temp02)
self.formLayout.addRow('', self.btn)
self.formLayout.addRow('Price change for PUMP/DUMP (%):', self.price_pd)
self.formLayout.addRow('Price change for DUMP/PUMP (%):', self.price_dp)
self.formLayout.addRow(self.btn_bstop, self.btn_bstart)
self.formLayout.addRow("stop trailing profit socket", self.btn_bstoptp)
self.layout.addLayout(self.formLayout)
# BOT FUNCTIONS
def stop_tp_sockets(self):
try:
for symbol in self._sockets:
bm61 = self._sockets[symbol]["socketmanager"]
key61 = self._sockets[symbol]["key"]
bm61.stop_socket(key61)
bm61.close()
self._sockets[symbol]["socketmanager"] = ""
self._sockets[symbol]["key"] = ""
self.write_to_console("Socket closed for "+symbol, to_push=1)
except:
self.write_to_console("Socket is empty", to_push=1)
def write_to_console(self, line, to_push=0):
self.setDisplayText(str(line.encode('utf-8','ignore'),errors='ignore'))
if self.enabledT and to_push==1:
percent=str(line.encode('utf-8','ignore'),errors='ignore')
send_text='https://api.telegram.org/bot' + self.bot_token + '/sendMessage?chat_id=' + self.bot_chatID + '&parse_mode=Markdown&text=' + percent
requests.get(send_text)
def precision_and_scale(self, x):
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return scale
def on_connect_api(self):
try:
if self.api == None:
self.write_to_console("Missing API info. Load config first", to_push=1)
return
if self.is_exchange_market:
info = self.api.futures_exchange_info()
## QUANTITY precision for Trailing stop market orders
self.price_precision = {}
self.quantity_precision = {}
for s in info['symbols']:
symbol = s['symbol']
self.quantity_precision[symbol] = s["quantityPrecision"]
for jj in s["filters"]:
if jj["filterType"] == "PRICE_FILTER":
self.price_precision[symbol] = self.precision_and_scale(float(jj["tickSize"]))
else:
info = self.api.get_exchange_info()
## QUANTITY precision for Trailing stop market orders
self.price_precision = {}
self.quantity_precision = {}
for s in info['symbols']:
symbol = s['symbol']
for ij in s['filters']:
if ij['filterType'] == "PRICE_FILTER":
self.price_precision[symbol] = self.precision_and_scale(float(ij["minPrice"]))
if ij['filterType'] == "LOT_SIZE":
self.quantity_precision[symbol] = self.precision_and_scale(float(ij["minQty"]))
if self.mode_analysis == "Automatic":
self.price_pd.setEnabled(False)
self.price_dp.setEnabled(False)
else:
self.price_pd.setEnabled(True)
self.price_dp.setEnabled(True)
self.btn_bstart.setEnabled(True)
self.btn_bstop.setEnabled(True)
self.btn.setEnabled(False)
self.write_to_console("Connected to "+self.exchange+" API successfully.", to_push=1)
self.write_to_console("Plots are available now", to_push=1)
self.trialtoolbar1.setEnabled(True)
self.trialtoolbar2.setEnabled(True)
self.trialtoolbar4.setEnabled(True)
# self.trialtoolbar31.setEnabled(True)
if self.popup_cnt == 0:
self.show_new_windowpnl()
self.show_new_windowtrade(self.trades_completed)
self.popup_cnt = 1
except:
self.write_to_console("Missing API info.", to_push=1)
## print trade stats in table
def disable_pre_pump_options(self,):
self.price_pd.setEnabled(False)
self.price_dp.setEnabled(False)
self.btn_bstart.setEnabled(False)
self.btn_bstop.setEnabled(True)
self.btn.setEnabled(False)
def enable_pump_options(self,):
self.price_pd.setEnabled(True)
self.price_dp.setEnabled(True)
self.btn_bstart.setEnabled(True)
self.btn_bstop.setEnabled(False)
self.btn.setEnabled(True)
#### Button Behaviour ####
def on_pump(self):
ct = time.time()
now = datetime.datetime.fromtimestamp(ct)
c_time = now.strftime("%Y-%m-%d_%H-%M-%S")
# c_time1 = now.strftime("%Y-%m-%d")
self.filename_ = "trade_logs.txt"
with open(self.filename_, "a") as myfile:
myfile.write("# New trade logs @ "+ c_time +" \n")
try:
if self.popup_cnt1 == 0:
self.popup_cnt1 = 1
if self.is_exchange_market:
### Checking to see if new threads for existing trades needed
if self.trades_completed != {}:
## verify if position open
temp = self.api.futures_position_information()
coins_symbol = [temp[i1]['symbol'] for i1 in range(len(temp)) \
if float(temp[i1]['entryPrice']) != 0.0]
for alt_coin in self.trades_completed:
if (self.trades_completed[alt_coin]["trade_status"] == "running") \
and (alt_coin in coins_symbol):
self.write_to_console("Retrieving previous trade for "+alt_coin, to_push=1)
## start a socket manager to keep eye on the price movement
bm1 = BinanceSocketManager(self.api)
conn = bm1.start_symbol_mark_price_socket(symbol=alt_coin, \
callback=self.sell_trailing_profit, fast=True)
self._sockets[alt_coin] = {"symbol": alt_coin, "socketmanager": bm1, "key": conn}
bm1.start()
time.sleep(.01)
self.write_to_console("Price socket started for "+alt_coin)
if (self.trades_completed[alt_coin]["trade_status"] == "running") \
and (alt_coin not in coins_symbol):
_ = self.api.futures_cancel_all_open_orders(symbol=alt_coin)
self.trades_completed[alt_coin]["trade_status"] = "finished"
self.write_to_console("Checking if any independent open orders are present.", to_push=1)
tempp = self.api.futures_get_open_orders()
ss = []
for j in tempp:
ss.append(j['symbol'])
ss = np.unique(ss)
for i in ss:
if i not in coins_symbol:
_ = self.api.futures_cancel_all_open_orders(symbol=i)
line = "cancelling all open orders for "+i
self.write_to_console(line, to_push=1)
else:
self.write_to_console("No active trade found.", to_push=1)
self.disable_pre_pump_options()
try:
percent = float(self.price_pd.text())
percent1 = float(self.price_dp.text())
except:
self.write_to_console("Please fill the price change in numbers", to_push=1)
return
if (percent <= 0.0) or (percent1 <= 0.0):
self.write_to_console("Price change percentage cannot be less than 0.", to_push=1)
self.enable_pump_options()
return
self.write_to_console("Price based analysis started.", to_push=1)
### connect binance websocket
self.bm = BinanceSocketManager(self.api)
if (self.price_analysis_mode == "market"):
self.conn_key = self.bm.start_all_mark_price_socket(self.process_message) #start_miniticker_socket
self.btn_bstoptp.setEnabled(True)
elif (self.price_analysis_mode == "last_price"):
self.conn_key = self.bm.start_ticker_socket(self.process_message) #start_miniticker_socket
self.btn_bstoptp.setEnabled(True)
# elif (self.price_analysis_mode == "liquidation"):
# self.conn_key = self.bm.start_ticker_socket_allliq(self.process_message_liq)
else:
self.write_to_console("Not yet implemented, select last_price or market for price analysis in config!", to_push=1)
self.bm.start()
time.sleep(.01)
self.write_to_console("Initialised successfully!", to_push=1)
self.status.showMessage("Bot is running now!")
except AttributeError:
self.write_to_console("You need to connect to Binance before starting.", to_push=1)
return
def process_message_liq(self, msg): # TODO
# sample stream OUTPUT
# {'e': 'forceOrder', 'E': 1619351660699, 'o':
# {'s': 'XRPUSDT', 'S': 'BUY', 'o': 'LIMIT',
# 'f': 'IOC', 'q': '4386.3', 'p': '1.0775',
# 'ap': '1.0711', 'X': 'FILLED',
# 'l': '1536.7', 'z': '4386.3', 'T': 1619351660692}}
print(msg)
def on_manual_sell(self):
self.enable_pump_options()
self.write_to_console("Stopping the P and D detector for Price analysis", to_push=1)
# reactor.stop()
try:
self.bm.stop_socket(self.conn_key)
self.bm.close()
except:
self.write_to_console("No socket is open.", to_push=1)
self.status.showMessage("Bot is stopped now!")
def limit_safety(self, alt_coin, units_old, statement, indicator_=None):
statement.append("Placing Limit Safety Orders for "+alt_coin+"\n")
# time.sleep(2)
## sleep for some seconds for the trade to be created
leverage = self.leverage
coin_trade = True
merge_runningv1 = True
loop_count = 0 ##0 to avoid forever loop
while merge_runningv1:
loop_count = loop_count + 1
if loop_count > 3:
merge_runningv1 = False
try:
temp = self.api.futures_position_information()
entry_price = [float(temp[i1]['entryPrice']) for i1 in range(len(temp)) \
if temp[i1]['symbol'] == alt_coin]
entry_price = entry_price[0]
except:
statement.append("Error getting the entry price from Binance, trying again in 10seconds")
entry_price = 0.0
time.sleep(10)
continue
if (coin_trade) and (entry_price > 0.0):
tab_cnt = 0
## scaled place safety order
if self.lim_trades_per_coin[alt_coin] > 1:
linspace = [self.usdt_invest + float(x)/(self.lim_trades_per_coin[alt_coin]-1)*\
(self.usdt_addfunds-self.usdt_invest) \
for x in range(self.lim_trades_per_coin[alt_coin])]
else:
linspace = [self.usdt_addfunds]
nb_units = []
price_enter = []
units_price = []
## first entry
nb_units.append(units_old)
price_enter.append(entry_price)
units_price.append(units_old*entry_price)
for i in range(self.lim_trades_per_coin[alt_coin]):
if indicator_ == 'long':
entry_price1 = entry_price * (1 - ((self.safety_trade_percent/100.)*(i+1)))
type_ = "BUY"
elif indicator_ == 'short':
entry_price1 = entry_price * (1 + ((self.safety_trade_percent/100.)*(i+1)))
type_ = "SELL"
if self.price_precision[alt_coin] == 0:
entry_price1 = int(entry_price1)
else:
entry_price1 = round(entry_price1, self.price_precision[alt_coin])
### scaled safety trades
units = float(linspace[i]) / (entry_price1 / leverage)
if self.quantity_precision[alt_coin] == 0:
units = int(units)
else:
units = round(units, self.quantity_precision[alt_coin])
nb_units.append(units)
price_enter.append(entry_price1)
units_price.append(units*entry_price1)
try:
_ = self.api.futures_create_order(symbol=alt_coin, side=type_, type="LIMIT", \
positionSide="BOTH", \
timeInForce="GTC", quantity=units, price=entry_price1)
tab_cnt = tab_cnt + 1
except:
statement.append("error during safety order placement \n")
if int(tab_cnt) == self.lim_trades_per_coin[alt_coin]:
self.trade_per_coin[alt_coin] = self.trade_per_coin[alt_coin] + 1
merge_runningv1 = False
coin_trade = False
else:
coin_trade = False
statement.append("Unkown error occured \n")
return statement
qsd = ''
dsq = ''
for num, st in enumerate(linspace):
qsd = qsd+str(st)+'; '
u = nb_units[:num+2]
# p = price_enter[:num+2]
up = units_price[:num+2]
pr = sum(up)/sum(u) # sum(u*p)/sum(u)
dsq = dsq+str(round(pr,6))+'; '
statement.append("The entry price for "+alt_coin +" is "+str(entry_price)+"\n")
statement.append("Safety funds are added (with leverage of "+str(leverage)+\
") in the following order: "+qsd+"\n")
statement.append("The safety trades will bring the entry price for "+alt_coin +" to: "+dsq+"\n")
statement.append("Funds added to existing trade for "+alt_coin+"\n")
statement.append("Exiting the sell Thread for "+alt_coin+"\n")
return statement
def _binance_buy_sell(self, alt_coin='BTCUSDT', current_value=0.0, \
statement=None, indicator_=None, ppercent=None):
leverage = self.leverage
## we should probably add 0.5% price to the current price to account for a dump
try:
if self.is_exchange_market:
## Making sure the trade being opened is CROSSED margin type
temp_var = self.api.futures_change_margin_type(symbol=alt_coin, marginType="CROSSED")
if temp_var['msg'] == 'success':
statement.append("Successfully updated the margin type to CROSSED for "+alt_coin+"\n")
except:
statement.append("Margin type is already set to CROSSED for "+alt_coin+"\n")
## change leverage of the coin
try:
if self.is_exchange_market:
## Making sure the trade being opened is CROSSED margin type
temp_var = self.api.futures_change_leverage(symbol=alt_coin, leverage=int(leverage))
statement.append("Successfully updated the leverage to "+str(temp_var["leverage"])+" for "+alt_coin+"\n")
except:
statement.append("Error during leverage setting for "+alt_coin+". PLEASE CHANGE MANUALLY \n")
units = self.usdt_invest / (current_value / leverage)
if self.quantity_precision[alt_coin] == 0:
units = int(units)
else:
units = round(units, self.quantity_precision[alt_coin])
if indicator_ == 'long':
type_ = "BUY"
elif indicator_ == 'short':
type_ = "SELL"
try: ## POSTING ORDERS IN BINANCE DIRECTLY
if self.is_exchange_market:
# Post order in futures
data = self.api.futures_create_order(symbol=alt_coin, type="MARKET", quantity=units, \
positionSide="BOTH", side=type_)
time.sleep(2)
## posting also limit safety orders for Futures
if self.lim_trades_per_coin[alt_coin] > 0:
statement = self.limit_safety(alt_coin, units, statement, indicator_)
else:
# Post order in SPOT
data = self.api.create_order(symbol=alt_coin, type="MARKET", quantity=units, \
side=type_)
except BinanceAPIException as e:
statement.append("Error in the Binance module while posting trades for "+alt_coin+"\n")
statement.append(f"(Code {e.status_code}) {e.message}")
return statement
# time.sleep(2)
# get order ID status
temp = self.api.futures_position_information()
entry_price_ = [[float(temp[i1]['entryPrice']),float(temp[i1]['positionAmt'])] \
for i1 in range(len(temp)) if temp[i1]['symbol'] == alt_coin]
data["entry_price"] = entry_price_[0][0]
data["entry_amount"] = entry_price_[0][1]
data["units_total"] = entry_price_[0][1]
if indicator_ == 'long':
if ppercent == 0.0 or ppercent == None:
sell_value = entry_price_[0][0] * (1 + (self.profit_percent/100.))
else:
sell_value = entry_price_[0][0] * (1 + (ppercent/100.))
type_ = "SELL"
elif indicator_ == 'short':
if ppercent == 0.0 or ppercent == None:
sell_value = entry_price_[0][0] * (1 - (self.profit_percent/100.))
else:
sell_value = entry_price_[0][0] * (1 - (ppercent/100.))
type_ = "BUY"
data["sell_value"] = sell_value
data["type_"] = type_
data["trade_time"] = time.time()
data["count"] = 0
data["ttp_activated"] = False
data["old_price"] = 1e10
data["trade_status"] = "running"
data["safety_count"] = self.lim_trades_per_coin[alt_coin]
self.trades_completed[alt_coin] = data
self.update_()
statement.append("New trade created in Binance for "+alt_coin+"\n")
## start a socket manager to keep eye on the price movement
bm21 = BinanceSocketManager(self.api)
conn21 = bm21.start_symbol_mark_price_socket(symbol=alt_coin, callback=self.sell_trailing_profit, fast=True)
self._sockets[alt_coin] = {"symbol": alt_coin, "socketmanager": bm21, "key": conn21}
bm21.start()
time.sleep(.01)
statement.append("Price socket started for "+alt_coin+"\n")
return statement
def stop_ticker_symbol(self, symbol):
try:
bm51 = self._sockets[symbol]["socketmanager"]
key51 = self._sockets[symbol]["key"]
bm51.stop_socket(key51)
bm51.close()
self._sockets[symbol]["socketmanager"] = ""
self._sockets[symbol]["key"] = ""
self.write_to_console("Socket closed for "+symbol, to_push=1)
except:
self.write_to_console("Socket is empty for "+symbol, to_push=1)
def sell_trailing_profit(self, msg):
symbol = msg["data"]['s']
price = float(msg["data"]['p']) ## market price
if self.trades_completed[symbol]["type_"] == "SELL" and self.trades_completed[symbol]["trade_status"]=="running":
if price > self.trades_completed[symbol]["sell_value"]:
if self.trades_completed[symbol]["count"] == 0:
temp = self.api.futures_position_information()
entry_price_ = [[float(temp[i1]['entryPrice']),float(temp[i1]['positionAmt'])] \
for i1 in range(len(temp)) if temp[i1]['symbol'] == symbol]
self.trades_completed[symbol]["units_total"] = entry_price_[0][1]
self.trades_completed[symbol]["count"] = 1
self.trades_completed[symbol]["ttp_activated"] = True
self.trades_completed[symbol]["old_price"] = np.copy(price)
if entry_price_[0][0] == 0:
_ = self.api.futures_cancel_all_open_orders(symbol=symbol)
## stop the ticker stream
self.trades_completed[symbol]["trade_status"] = "finished"
self.update_()
self.stop_ticker_symbol(symbol)
if self.trades_completed[symbol]["ttp_activated"] and self.trades_completed[symbol]["trade_status"]=="running":
if price > self.trades_completed[symbol]["old_price"]*(1 + (self.take_profit_trailing/100.)):
self.trades_completed[symbol]["old_price"] = self.trades_completed[symbol]["old_price"]*(1 + (self.take_profit_trailing/100.))
elif price < self.trades_completed[symbol]["old_price"] and self.trades_completed[symbol]["trade_status"]=="running":
self.trades_completed[symbol]["trade_status"] = "finished"
_ = self.api.futures_create_order(symbol=symbol, type="MARKET",
quantity=self.trades_completed[symbol]["units_total"], \
positionSide="BOTH", side="SELL")
## remove open orders from book
_ = self.api.futures_cancel_all_open_orders(symbol=symbol)
## stop the ticker stream
self.update_()
self.stop_ticker_symbol(symbol)
elif self.trades_completed[symbol]["type_"] == "BUY" and self.trades_completed[symbol]["trade_status"]=="running":
if price < self.trades_completed[symbol]["sell_value"]:
if self.trades_completed[symbol]["count"] == 0:
temp = self.api.futures_position_information()
entry_price_ = [[float(temp[i1]['entryPrice']),float(temp[i1]['positionAmt'])] \
for i1 in range(len(temp)) if temp[i1]['symbol'] == symbol]
self.trades_completed[symbol]["units_total"] = abs(entry_price_[0][1])
self.trades_completed[symbol]["count"] = 1
self.trades_completed[symbol]["ttp_activated"] = True
self.trades_completed[symbol]["old_price"] = np.copy(price)
if entry_price_[0][0] == 0:
_ = self.api.futures_cancel_all_open_orders(symbol=symbol)
## stop the ticker stream
self.trades_completed[symbol]["trade_status"] = "finished"
self.update_()
self.stop_ticker_symbol(symbol)
if self.trades_completed[symbol]["ttp_activated"] and self.trades_completed[symbol]["trade_status"]=="running":
if price < self.trades_completed[symbol]["old_price"]*(1 - (self.take_profit_trailing/100.)):
self.trades_completed[symbol]["old_price"] = self.trades_completed[symbol]["old_price"]*(1 - (self.take_profit_trailing/100.))
elif price > self.trades_completed[symbol]["old_price"] and self.trades_completed[symbol]["trade_status"]=="running":
self.trades_completed[symbol]["trade_status"] = "finished"
_ = self.api.futures_create_order(symbol=symbol, type="MARKET",
quantity=self.trades_completed[symbol]["units_total"], \
positionSide="BOTH", side="BUY")
## remove open orders from book
_ = self.api.futures_cancel_all_open_orders(symbol=symbol)
## stop the ticker stream
self.update_()
self.stop_ticker_symbol(symbol)
def print_statement(self, c_time, symbol, flag1, volDiff1, volDiff, current_price, old_price, \
percent_chgsP, indicator_, ppercent):
statement = []
## check open position counts (from Binance)
coin_temp = []
count = 10000000
try:
if self.is_exchange_market:
temp = self.api.futures_position_information()
coin_temp = [temp[i1]['symbol'] for i1 in range(len(temp)) if float(temp[i1]['entryPrice']) != 0.0]
count = len(coin_temp)
statement.append("Current active smart trades in Binance is : "+str(count)+"\n")
# TODO implement strategy for SPOT market
except:
statement.append("Problem collecting the open position history (Binance module); \
setting trade counts to 10000000 (i.e. cannot trade until Binance comes back online)\n")
trade_log = False
if symbol in coin_temp:
statement.append("Order already open for this coin in Binance, doing nothing for "+symbol+"\n")
elif (self.live_trade) and (count < self.lim_trades):
self.nb_trades = self.nb_trades + 1
statement = self._binance_buy_sell(alt_coin=symbol, \
current_value=current_price, \
statement=statement, \
indicator_=indicator_,\
ppercent=ppercent)
trade_log = True
elif (count >= self.lim_trades):
statement.append("Limit active trades in progress, will still continue with Safety for open trades")
return
sym = "SYM: " + symbol
flag = "PRICE! ("+flag1+")"
vDiff = "DIFF (%): " + str(round(volDiff, 2))
pcci = "Old price: "+ str(old_price)
pcci1 = "Current price: "+ str(current_price)
curr_pd = "Current price change threshold: "+str(percent_chgsP)
volval = ''
if volDiff1 > 0.0:
volval = "BUYING activity \n"
elif volDiff1 < 0.0:
volval = "SELLING activity \n"
my_string = ' || '.join(map(str, [c_time, flag, sym, pcci, pcci1, curr_pd, vDiff, volval]))
str_from_list = ''.join([data for ele in statement for data in ele])
if trade_log:
with open(self.filename_, "a") as myfile:
myfile.write(my_string)
myfile.write(str_from_list+" \n")
self.write_to_console(str_from_list, to_push=1)
self.write_to_console(my_string, to_push=1)
def process_message(self, msg):
if self.price_analysis_mode == "market":
msg = msg["data"]
ct = time.time()
now = datetime.datetime.fromtimestamp(ct)
c_time = now.strftime("%Y-%m-%d %H:%M:%S")
for ijk in range(len(msg)):
x = currency_container(msg[ijk], candle_len=len(self.candlesP), mode=self.price_analysis_mode)
if (x.symbol not in self.coins) or x.symbol[-len(self.basecurrency):] != self.basecurrency:
continue
if x.symbol not in self.new_list:
if self.mode_analysis == "Automatic":
if self.is_exchange_market:
trades= self.api.futures_klines(symbol=x.symbol, interval="1m", limit=1000)
else:
trades= self.api.get_klines(symbol=x.symbol, interval="1m", limit=1000)
## candle stats
percentT = [100*(float(d[2]) - float(d[3]))/float(d[2]) for i, d in enumerate(trades)]
temp_ = [0.1 if np.mean(percentT) < 0.1 else np.mean(percentT)]
x.percent_chgsP = temp_[0]
x.profit_percentP = temp_[0]
else:
x.percent_chgsP = float(self.price_pd.text())
self.new_list[x.symbol] = x
self.write_to_console("Gathering (only "+self.basecurrency+" pairs) "+x.symbol, to_push=1)
else:
stored_currency = self.new_list[x.symbol]
indicator_ = np.copy(self.indicator) #Perm copy
if ((ct - stored_currency.time_stamp) > 1):
stored_currency.time_stamp = ct
for i in range(len(stored_currency.time_stamp_period)):
if ((ct - stored_currency.time_stamp_period[i]) >= self.candlesP[i]):
execute_trade = False
priceDiff1 = ((x.bid_price - stored_currency.price_time[i]) / stored_currency.price_time[i]) * 100
# temp var to launch
if self.mode_analysis == "Automatic":
if indicator_ == 'long':
pd_val = stored_currency.percent_chgsP
dp_val = pd_val * 50 # some high value
else:
dp_val = stored_currency.percent_chgsP
pd_val = pd_val * 50 # some high value
else:
pd_val = self.price_pd.text()
dp_val = self.price_dp.text()
if ((priceDiff1 < 0.0) and (abs(priceDiff1) > float(dp_val))) or \
((priceDiff1 > 0.0) and (float(dp_val) > abs(priceDiff1) > float(pd_val))):
## big DUMP or small PUMP (open a LONG)
if indicator_ == 'long':
execute_trade = True
elif indicator_ == 'short':
execute_trade = False
elif ((priceDiff1 < 0.0) and (float(dp_val) > abs(priceDiff1) > float(pd_val))) or \
((priceDiff1 > 0.0) and (abs(priceDiff1) > float(dp_val))):
## small DUMP or big PUMP (open a SHORT)
if indicator_ == 'short':
execute_trade = True
elif indicator_ == 'long':
execute_trade = False
if execute_trade and self.running==False:
self.running = True
# process_temp = threading.Thread(target=self.print_statement, args=(c_time, stored_currency.symbol, \
# str(self.candlesP[i])+" Sec", \
# priceDiff1, abs(priceDiff1), \
# x.bid_price,stored_currency.price_time[i],\
# stored_currency.percent_chgsP, indicator_,
# stored_currency.profit_percentP), daemon=True)
# process_temp.start()
self.print_statement(c_time, stored_currency.symbol, \
str(self.candlesP[i])+" Sec", \
priceDiff1, abs(priceDiff1), \
x.bid_price,stored_currency.price_time[i],\
stored_currency.percent_chgsP, indicator_,
stored_currency.profit_percentP)
stored_currency.time_stamp_period = [ct for _ in range(len(self.candlesP))]
stored_currency.price_time = [x.bid_price for _ in range(len(self.candlesP))]
self.running = False
stored_currency.price_time[i] = x.bid_price
stored_currency.time_stamp_period[i] = ct
stored_currency.volume24hr = x.volume24hr
if ((ct - stored_currency.time_stamp_reset) > 3600):
stored_currency.time_stamp_reset = ct
if self.mode_analysis == "Automatic":
if self.is_exchange_market:
trades= self.api.futures_klines(symbol=x.symbol, interval="1m", limit=1000)
else:
trades= self.api.get_klines(symbol=x.symbol, interval="1m", limit=1000)
## candle stats
percentT = [100*(float(d[2]) - float(d[3]))/float(d[2]) for i, d in enumerate(trades)]
temp_ = [0.1 if np.mean(percentT) < 0.1 else np.mean(percentT)]
stored_currency.percent_chgsP = temp_[0]
stored_currency.profit_percentP = temp_[0]
else:
stored_currency.percent_chgsP = float(self.price_pd.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_()) |
<filename>unittests/test_discord.py
import time
import logging
import os
import socket
import unittest
import yaml
from mock import mock
from octoprint_discordremote.discord import Discord
from octoprint_discordremote.embedbuilder import EmbedBuilder, upload_file, DISCORD_MAX_FILE_SIZE
from unittests.discordremotetestcase import DiscordRemoteTestCase
class TestLogger(logging.Logger):
def __init__(self):
super(TestLogger, self).__init__(name=None)
def setLevel(self, level):
pass
def debug(self, msg, *args):
print("DEBUG: %s" % msg, args)
def info(self, msg, *args):
print("INFO: %s" % msg, args)
def warning(self, msg, *args):
print("WARNING: %s" % msg, args)
def error(self, msg, *args):
print("ERROR: %s" % msg, args)
def exception(self, msg, *args):
print("EXCEPTION: %s" % msg, args)
def critical(self, msg, *args):
print("CRITICAL: %s" % msg, args)
class TestSend(DiscordRemoteTestCase):
def setUp(self):
self.discord = Discord()
if "NET_TEST" in os.environ:
config_file = self._get_path("../config.yaml")
try:
with open(config_file, "r") as config:
config = yaml.load(config.read(), Loader=yaml.SafeLoader)
self.discord.configure_discord(bot_token=config['bottoken'],
channel_id=config['channelid'],
logger=TestLogger(),
command=None)
time.sleep(5)
except:
self.fail("To test discord bot posting, you need to create a file "
"called config.yaml in the root directory with your bot "
"details. NEVER COMMIT THIS FILE.")
def tearDown(self):
self.discord.shutdown_discord()
@unittest.skipIf("NET_TEST" not in os.environ,
"'NET_TEST' not in os.environ - Not running network test")
def test_dispatch(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
except Exception as e:
self.fail("Can't be tested without an internet connection")
finally:
s.close()
# Should result in 3 messages, one embed, one embed with photo, and one photo.
builder = EmbedBuilder()
builder.set_title("Test title")
builder.set_description("No snapshot")
self.assertTrue(self.discord._dispatch_message(embed=builder.get_embeds()[0]))
with open(self._get_path("test_pattern.png"), "rb") as f:
builder.set_description("With snapshot")
builder.set_image(("snapshot.png", f))
self.assertTrue(self.discord._dispatch_message(embed=builder.get_embeds()[0]))
f.seek(0)
self.assertTrue(self.discord._dispatch_message(snapshot=("snapshot.png", f)))
def test_send(self):
self.discord._dispatch_message = mock.Mock()
mock_snapshot = mock.Mock()
mock_embed = mock.Mock()
self.assertTrue(self.discord.send(snapshots=[mock_snapshot], embeds=[mock_embed]))
self.assertEqual(2, self.discord._dispatch_message.call_count)
calls = [mock.call(snapshot=mock_snapshot),
mock.call(embed=mock_embed)]
self.discord._dispatch_message.assert_has_calls(calls=calls)
large_file_path = self._get_path("large_file_temp")
with open(large_file_path, 'w') as f:
for i in range(0, DISCORD_MAX_FILE_SIZE):
f.write(unicode(i))
embeds = upload_file(large_file_path)
self.discord.send(embeds=embeds)
@unittest.skipIf("LONG_TEST" not in os.environ,
"'LONG_TEST' not in os.environ - Not running long test")
def test_reconnect(self):
# Wait til connected fully
while self.discord.session_id is None:
time.sleep(0.001)
print("Connected and authenticated: %s" % self.discord.session_id)
orig_send_resume = self.discord.send_resume
self.discord.send_resume = mock.Mock()
self.discord.send_resume.side_effect = orig_send_resume
orig_handle_invalid = self.discord.handle_invalid_session
self.discord.handle_invalid_session = mock.Mock()
self.discord.handle_invalid_session.side_effect = orig_handle_invalid
while self.discord.restart_event.is_set():
time.sleep(0.001)
self.discord.web_socket = None
self.discord.restart_event.set()
resume_succeeded = 0
for i in range(0, 1100):
self.discord.send_resume.reset_mock()
while self.discord.restart_event.is_set():
time.sleep(1)
self.discord.restart_event.set()
# Wait til resume is called
while not self.discord.send_resume.called:
time.sleep(1)
self.discord.send_resume.reset_mock()
# Check if invalid session occurred. Might not receive it til the next iteration.
if self.discord.handle_invalid_session.called:
resume_succeeded -= 1
self.discord.handle_invalid_session.reset_mock()
resume_succeeded += 1
print("Resumed: %i, Succeeded: %i" % (i, resume_succeeded))
print("Total Successful Resumes: %i" % resume_succeeded)
|
<gh_stars>1-10
import json
import torch
from utils.diffquantitative import DiffQuantitativeSemantic
K=10
class Car:
""" Describes the physical behaviour of the vehicle """
def __init__(self):
self._max_acceleration = 20.0
self._min_acceleration = -self._max_acceleration
self._max_velocity = 20.0
self._min_velocity = 0.0
self.gravity = 9.81
self.position = torch.tensor(0.0)
self.velocity = torch.tensor(0.0)
self.acceleration = torch.tensor(0.0)
self.friction_coefficient = 0.01
def update(self, in_acceleration, dt):
""" Differential equation for updating the state of the car """
self.acceleration = torch.clamp(in_acceleration, self._min_acceleration, self._max_acceleration)
if self.velocity > 0:
self.acceleration -= self.friction_coefficient * self.gravity
self.velocity = torch.clamp(self.velocity + self.acceleration * dt, self._min_velocity, self._max_velocity)
self.position += self.velocity * dt
class Environment:
def __init__(self):
self._leader_car = Car()
def set_agent(self, agent):
self._agent = agent
self.initialized()
def initialized(self):
self.actuators = 1
self.sensors = len(self.status)
@property
def l_position(self):
return self._leader_car.position.clone()
@l_position.setter
def l_position(self, value):
self._leader_car.position = value
@property
def l_velocity(self):
return self._leader_car.velocity.clone()
@l_velocity.setter
def l_velocity(self, value):
self._leader_car.velocity = value
@property
def status(self):
""" Representation of the state """
return (self.l_velocity,
self._agent.velocity,
self._agent.distance)
def update(self, parameters, dt):
""" Updates the physical state with the parameters
generated by the NN.
"""
acceleration = parameters
self._leader_car.update(acceleration, dt)
class Agent:
def __init__(self):
self._car = Car()
def set_environment(self, environment):
self._environment = environment
self.initialized()
def initialized(self):
self.actuators = 1
self.sensors = len(self.status)
@property
def position(self):
return self._car.position.clone()
@position.setter
def position(self, value):
self._car.position = value
@property
def velocity(self):
return self._car.velocity.clone()
@velocity.setter
def velocity(self, value):
self._car.velocity = value
@property
def distance(self):
return self._environment.l_position - self._car.position
@distance.setter
def distance(self, value):
self._car.distance = value
@property
def status(self):
""" Representation of the state """
return (self._environment.l_velocity,
self.velocity,
self.distance)
def update(self, parameters, dt):
""" Updates the physical state with the parameters
generated by the NN.
"""
acceleration = parameters
self._car.update(acceleration, dt)
class Model:
""" The model of the whole world.
It includes both the attacker and the defender.
"""
def __init__(self, param_generator):
self.agent = Agent()
self.environment = Environment()
self.agent.set_environment(self.environment)
self.environment.set_agent(self.agent)
self._param_generator = param_generator
self.traces = None
def step(self, env_input, agent_input, dt):
""" Updates the physical world with the evolution of
a single instant of time.
"""
self.environment.update(env_input, dt)
self.agent.update(agent_input, dt)
self.traces['dist'].append(self.agent.distance)
def initialize_random(self):
""" Sample a random initial state """
agent_position, agent_velocity, leader_position, leader_velocity = next(self._param_generator)
self._last_init = (agent_position, agent_velocity, leader_position, leader_velocity)
self.reinitialize(agent_position, agent_velocity, leader_position, leader_velocity)
def initialize_rewind(self):
""" Restore the world's state to the last initialization """
self.reinitialize(*self._last_init)
def reinitialize(self, agent_position, agent_velocity, leader_position, leader_velocity):
""" Sets the world's state as specified """
self.agent.position = torch.tensor(agent_position).reshape(1)
self.agent.velocity = torch.tensor(agent_velocity).reshape(1)
self.environment.l_position = torch.tensor(leader_position).reshape(1)
self.environment.l_velocity = torch.tensor(leader_velocity).reshape(1)
self.traces = {
'dist': []
}
class RobustnessComputer:
""" Used to compute the robustness value (rho) """
def __init__(self, formula):
self.dqs = DiffQuantitativeSemantic(formula)
def compute(self, model):
""" Computes rho for the given trace """
d = model.traces['dist'][-K:]
return self.dqs.compute(dist=torch.cat(d))
|
<gh_stars>1-10
import math
import random
import string
import unittest
import itertools
import contextlib
import warnings
import pickle
from copy import deepcopy
from itertools import repeat, product
from functools import wraps, reduce
from operator import mul
from collections import OrderedDict
import hashlib
import os
import torch
from torch._six import inf, nan
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel as dp
import torch.nn.init as init
import torch.nn.utils.rnn as rnn_utils
import torch.legacy.nn as legacy
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from torch.autograd import Variable, gradcheck
from torch.autograd.gradcheck import gradgradcheck
from torch.nn import Parameter
from torch.nn.parallel._functions import Broadcast
from common import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, \
TEST_SCIPY, IS_WINDOWS, download_file, PY3, PY34, to_gpu, \
get_function_arglist, skipCUDAMemoryLeakCheckIf
from common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, \
TEST_CUDNN_VERSION
from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \
module_tests, criterion_tests, loss_reference_fns, get_reduction, \
get_weight, smoothl1loss_reference, kldivloss_reference, ctcloss_reference
if TEST_SCIPY:
from scipy import stats
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
NO_HALF_TENSORTYPES = [torch.float,
torch.double]
DOUBLE_TENSORTYPES = [torch.double]
dtype2prec = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2}
# WARNING: If you add a new top-level test case to this file, you MUST
# update test/run_test.py to list it, otherwise it will NOT be run in
# CI.
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
if PY34:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
else:
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
class PackedSequenceTest(TestCase):
_type_by_name = {
'torch.DoubleTensor': (torch.DoubleTensor, 'double'),
'torch.FloatTensor': (torch.FloatTensor, 'float'),
# We leave out `'torch.HalfTensor': (torch.HalfTensor, 'half'),`
# because of an error in `pad_packed_sequence`
# > AttributeError: 'torch.HalfTensor' object has no attribute 'fill_'
'torch.LongTensor': (torch.LongTensor, 'long'),
'torch.IntTensor': (torch.IntTensor, 'int'),
'torch.ShortTensor': (torch.ShortTensor, 'short'),
'torch.CharTensor': (torch.CharTensor, 'char'),
'torch.ByteTensor': (torch.ByteTensor, 'byte'),
}
def __init__(self, *args, **kwargs):
super(PackedSequenceTest, self).__init__(*args, **kwargs)
self.batch_size = 5
self.max_length = 6
def _ordered_sequence(self, tensor_type):
"""Create ordered list of random sequences"""
seqs = [tensor_type(random.randint(1, self.max_length))
for _ in range(self.batch_size)]
seqs = [s.random_(-128, 128) for s in seqs]
ordered = sorted(seqs, key=len, reverse=True)
return ordered
def _padded_sequence(self, tensor_type):
"""Create Tensor of random padded sequences"""
ordered = self._ordered_sequence(tensor_type)
lengths = list(map(len, ordered))
padded_tensor = rnn_utils.pad_sequence(ordered)
return padded_tensor, lengths
def test_type_casts(self):
"""Test type casting of `PackedSequence` against type casting of tensor"""
for _, (input_type, _) in self._type_by_name.items():
for expected_type_str, (_, cast_str) in self._type_by_name.items():
padded, lengths = self._padded_sequence(input_type)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
# Apply cast to `PackedSequence` instance and unpack
masked = getattr(packed, cast_str)()
unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)
self.assertEqual(unpacked.type(), expected_type_str)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cuda_mask(self):
tensor_type = torch.FloatTensor
cuda_type_str = 'torch.cuda.FloatTensor'
padded, lengths = self._padded_sequence(tensor_type)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
self.assertFalse(packed.is_cuda)
packed = packed.cuda()
self.assertTrue(packed.is_cuda)
unpacked, _ = rnn_utils.pad_packed_sequence(packed)
self.assertEqual(unpacked.type(), cuda_type_str)
def test_total_length(self):
padded, lengths = self._padded_sequence(torch.FloatTensor)
max_length = max(lengths)
packed = rnn_utils.pack_padded_sequence(padded, lengths)
# test ValueError if total_length < max_length
for total_length in (-1, 0, max_length - 1):
for batch_first in (True, False):
def err_fn():
rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertRaisesRegex(ValueError,
r'Expected total_length to be at least the '
r'length of the longest sequence in input',
err_fn)
# test that pad_packed_sequence returns results of correct length
for batch_first in (True, False):
no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
for total_length_delta in (0, 1, 8):
total_length = max_length + total_length_delta
unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,
total_length=total_length)
self.assertEqual(lengths, lengths_out)
self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)
if total_length_delta == 0:
ref_output = no_extra_pad
elif batch_first:
extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)
ref_output = torch.cat([no_extra_pad, extra_pad], 1)
else:
extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)
ref_output = torch.cat([no_extra_pad, extra_pad], 0)
self.assertEqual(unpacked, ref_output)
def test_to(self):
padded, lengths = self._padded_sequence(torch.IntTensor)
a = rnn_utils.pack_padded_sequence(padded, lengths).cpu()
self.assertIs(a, a.to('cpu'))
self.assertIs(a, a.to('cpu', dtype=torch.int32))
self.assertEqual(a.long(), a.to(torch.int64))
if torch.cuda.is_available():
for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:
b = a.cuda(device=cuda)
self.assertIs(b, b.to(cuda))
self.assertEqual(a, b.to('cpu'))
self.assertEqual(b, a.to(cuda))
self.assertEqual(a, b.to('cpu', dtype=torch.int32))
self.assertIs(b, b.to(dtype=torch.int32))
self.assertEqual(b.long(), b.to(dtype=torch.int64))
def default_tensor_type(type):
type_str = torch.typename(type)
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
old_type = torch.Tensor().type()
torch.set_default_tensor_type(type_str)
try:
return fn(*args, **kwargs)
finally:
torch.set_default_tensor_type(old_type)
return wrapper
return decorator
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
class InputVariableMixin(object):
def _get_input(self):
input = TestBase._get_input(self, False)
def map_variables(i):
if isinstance(i, torch.Tensor):
if i.is_floating_point():
i.requires_grad = True
return i
else:
return type(i)(map_variables(elem) for elem in i)
return map_variables(input)
class NewModuleTest(InputVariableMixin, ModuleTest):
def __init__(self, *args, **kwargs):
super(NewModuleTest, self).__init__(*args, **kwargs)
self.cudnn = kwargs.get('cudnn', False)
self.check_inplace = kwargs.get('check_inplace', False)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
def _do_test(self, test_case, module, input):
test_case.check_jacobian(module, input, self.jacobian_input)
if self.check_gradgrad:
# could probably unify check_jacobian above with this.
params = tuple(x for x in module.parameters())
_assertGradAndGradgradChecks(test_case,
lambda x, *args, **kw: test_case._forward(module, x), (input,) + params)
# check if module can be printed
module.__repr__()
if self.check_inplace:
# check if the inplace variant of the module gives the same result
# as the out-of-place
module_ip = self.constructor(*self.constructor_args, inplace=True)
input_version = input._version
with freeze_rng_state():
output = module(input)
test_case.assertEqual(input._version, input_version)
input_ip = deepcopy(input)
input_ip_clone = input_ip.clone()
with freeze_rng_state():
output_ip = module_ip(input_ip_clone)
test_case.assertNotEqual(input_ip_clone._version, input_version)
test_case.assertEqual(output, output_ip)
grad = output.data.clone().normal_()
input.grad.data.zero_()
output.backward(grad)
output_ip.backward(grad)
test_case.assertEqual(input.grad, input_ip.grad)
if isinstance(input, torch.LongTensor) and TEST_CUDA:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
input = input.cuda()
module.float().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
if torch.cuda.device_count() > 1:
input = input.cuda(1)
module.cuda(1)
with torch.cuda.device(1):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 1)
else:
# check that float()/double() casters work correctly
# to float
if not isinstance(input, torch.LongTensor):
input = input.float()
module.float()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.FloatTensor)
# and back to double
if not isinstance(input, torch.LongTensor):
input = input.double()
module.double()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.DoubleTensor)
if TEST_CUDA and self.should_test_cuda:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
# to GPU0
input = input.float().cuda()
module.float().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
# to CPU
input = input.cpu()
module.cpu()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.FloatTensor)
# back to GPU0
input = input.cuda()
module.cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
# test that forwards of module runs correctly without cuDNN
if self.cudnn:
with torch.backends.cudnn.flags(enabled=False):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
if torch.cuda.device_count() >= 2:
# test cross-GPU transfer works
# to GPU1
input = input.cuda(1)
module.cuda(1)
with torch.cuda.device(1):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 1)
# test double()
input = input.double().cuda()
module.double().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.DoubleTensor)
test_case.assertEqual(p.get_device(), 0)
# test half()
input = input.half().cuda()
module.half().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.HalfTensor)
test_case.assertEqual(p.get_device(), 0)
def _get_target(self):
return self._get_arg('target', False)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
class NewCriterionTest(InputVariableMixin, CriterionTest):
# TODO: check that criterions don't ignore grad_output
def __init__(self, *args, **kwargs):
super(NewCriterionTest, self).__init__(*args, **kwargs)
self.check_gradgrad = kwargs.get('check_gradgrad', True)
self.check_half = kwargs.get('check_half', True)
self.convert_target = kwargs.get('convert_target', True)
def _do_extra_tests(self, test_case, module, input, target):
if not self.check_gradgrad:
return
test_case.assertFalse(target.requires_grad)
params = tuple(x for x in module.parameters())
if not isinstance(input, tuple):
inputs = (input,) + params
def apply_fn(input, *params):
return module(input, target)
else:
inputs = input + params
def apply_fn(input1, input2, *params):
return module(input1, input2, target)
# TODO: we don't pass `target` as part of inputs because we don't
# currently compute the gradient w.r.t. target for loss functions.
gradcheck(apply_fn, inputs)
gradgradcheck(apply_fn, inputs)
def test_cuda(self, test_case, dtype=None, extra_args=None):
def convert_dtype(obj, dtype, requires_grad=False):
if isinstance(obj, torch.Tensor):
return torch.tensor(obj.data, dtype=dtype, requires_grad=requires_grad)
elif isinstance(obj, torch.Tensor):
return obj.to(dtype)
elif isinstance(obj, tuple):
return tuple(convert_dtype(o, dtype, requires_grad) for o in obj)
else:
return obj
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest('Excluded from CUDA tests')
try:
cpu_input = self._get_input()
cpu_target = self._get_target()
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args)
# Convert input, target and module parameters to dtype
if dtype is not None:
cpu_input = convert_dtype(cpu_input, dtype, True)
# NLLLoss requires target to be LongTensor
if not isinstance(cpu_target, torch.LongTensor) and self.convert_target:
cpu_target = convert_dtype(cpu_target, dtype)
cpu_module.type(dtype)
gpu_module.type(dtype)
# GPU setup
gpu_input = to_gpu(cpu_input)
gpu_target = to_gpu(cpu_target)
gpu_module.cuda()
# torch.HalfTensor doesn't support most operations, converting back to default
if dtype == torch.half:
cpu_input = self._get_input()
cpu_target = self._get_target()
# Loss modules with weights require consistent input/module weight types
cpu_module = self.constructor(*self.constructor_args)
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args)
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args)
# dtype can be None, so set precision in this way instead of a precision map
test_case.assertEqual(cpu_output, gpu_output, 1e-1 if dtype == torch.half else 4e-4)
cpu_gradInput = test_case._backward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args)
gpu_gradInput = test_case._backward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 1e-1 if dtype == torch.half else 4e-4)
except NotImplementedError:
pass
def _get_target(self):
return self._get_arg('target', False)
@property
def constructor_args(self):
return self._get_arg('constructor_args', False)
@property
def extra_args(self):
return self._get_arg('extra_args', False)
class TestNN(NNTestCase):
_do_cuda_memory_leak_check = True
def _forward(self, module, input):
with freeze_rng_state():
return module(input)
def _backward(self, module, input, output, grad_output, create_graph=False):
output.backward(grad_output, retain_graph=True, create_graph=create_graph)
if input.grad is None:
return None
return input.grad.data
def _forward_criterion(self, criterion, input, target, extra_args=None):
if extra_args is None:
extra_args = tuple()
if isinstance(input, tuple):
args = input + (target,) + extra_args
output = criterion(*args)
else:
output = criterion(input, target, *extra_args)
return output.item()
def _backward_criterion(self, criterion, input, target, gradOutput=None, extra_args=None):
if extra_args is None:
extra_args = tuple()
input_tuple = input if isinstance(input, tuple) else (input,)
for i in input_tuple:
if i.grad is not None:
i.grad.data.zero_()
args = input_tuple + (target,) + extra_args
if gradOutput is None:
gradOutput = torch.ones(())
criterion(*args).backward(gradOutput.type_as(input_tuple[0]))
if isinstance(input, tuple):
return tuple(map(lambda i: i.grad.data, input))
else:
return input.grad.data
def _zero_grad_parameters(self, module):
for p in module.parameters():
if p.grad is not None:
p.grad.data.zero_()
p.grad.detach_()
def _get_parameters(self, module):
params = []
d_params = []
for p in module.parameters():
params.append(p)
d_params.append(p.grad)
return params, d_params
def test_module_backcompat(self):
from torch.serialization import SourceChangeWarning
path = download_file('https://download.pytorch.org/test_data/linear.pt')
with warnings.catch_warnings():
warnings.simplefilter('ignore', SourceChangeWarning)
m = torch.load(path)
input = torch.randn(2, 3, dtype=torch.float)
self.assertEqual(m(input).size(), (2, 5))
def test_hooks(self):
module = nn.Sigmoid()
input = torch.ones(5, 5, requires_grad=True)
counter = {
'forwards': 0,
'backwards': 0
}
def fw_hook(inc, h_module, input, output):
self.assertIsInstance(input, tuple)
self.assertTrue(isinstance(output, torch.Tensor))
self.assertTrue(h_module is module)
self.assertEqual(input[0].data, torch.ones(5, 5))
self.assertEqual(output.data, torch.Tensor(5, 5).fill_(1 / (1 + 1 / math.e)))
counter['forwards'] += inc
def bw_hook(inc, h_module, grad_input, grad_output):
self.assertIsInstance(grad_input, tuple)
self.assertIsInstance(grad_output, tuple)
self.assertTrue(h_module is module)
self.assertEqual(grad_output[0].data, torch.ones(5, 5) * 2)
counter['backwards'] += inc
test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))
module(input)
module(input)
self.assertEqual(counter['forwards'], 2)
self.assertEqual(counter['backwards'], 0)
test_bwd = module.register_backward_hook(
lambda *args: bw_hook(1, *args))
output = module(input)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 0)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 1)
output.backward(torch.ones(5, 5) * 2, retain_graph=True)
self.assertEqual(counter['forwards'], 3)
self.assertEqual(counter['backwards'], 2)
test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))
output = module(input)
self.assertEqual(counter['forwards'], 6)
self.assertEqual(counter['backwards'], 2)
test2_bwd = module.register_backward_hook(lambda *args: bw_hook(2, *args))
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 9)
self.assertEqual(counter['backwards'], 5)
test2_bwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 12)
self.assertEqual(counter['backwards'], 6)
test2_fwd.remove()
module(input).backward(torch.ones(5, 5) * 2)
self.assertEqual(counter['forwards'], 13)
self.assertEqual(counter['backwards'], 7)
test_fwd.remove()
test_bwd.remove()
def test_hook_cpp(self):
counter = [0]
bn = nn.BatchNorm1d(5)
def hook(module, grad_inputs, grad_outputs):
counter[0] += 1
self.assertEqual(len(grad_inputs), 3)
self.assertEqual(len(grad_outputs), 1)
self.assertEqual(module, bn)
bn.register_backward_hook(hook)
output = bn(torch.randn(5, 5, requires_grad=True))
output.sum().backward()
def test_hook_fail(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)
def fw_fail1(self, input, output):
return output
def fw_fail2(self, input, output):
return input
def bw_fail1(self, grad_input, grad_output):
return grad_input[:-1]
def bw_fail2(self, grad_input, grad_output):
return grad_input + (torch.randn(2, 2),)
with module.register_forward_hook(fw_fail1):
with self.assertRaises(RuntimeError) as err:
module(input)
self.assertIn("fw_fail", err.exception.args[0])
self.assertIn("didn't return None", err.exception.args[0])
with module.register_forward_hook(fw_fail2):
with self.assertRaises(RuntimeError) as err:
module(input)
self.assertIn("fw_fail2", err.exception.args[0])
self.assertIn("didn't return None", err.exception.args[0])
with module.register_backward_hook(bw_fail1):
with self.assertRaises(RuntimeError) as err:
module(input).sum().backward()
self.assertIn("bw_fail", err.exception.args[0])
self.assertIn("got 0, but expected 1", err.exception.args[0])
with module.register_backward_hook(bw_fail2):
with self.assertRaises(RuntimeError) as err:
module(input).sum().backward()
self.assertIn("bw_fail2", err.exception.args[0])
self.assertIn("got 2, but expected 1", err.exception.args[0])
def test_hook_writeable(self):
module = nn.Linear(5, 5)
input = torch.randn(5, 5, requires_grad=True)
def bw_hook(module, grad_input, grad_output):
for grad in grad_input:
self.assertTrue(isinstance(grad, torch.Tensor))
for grad in grad_output:
self.assertTrue(isinstance(grad, torch.Tensor))
return tuple(gi * 2 for gi in grad_input)
module.register_backward_hook(bw_hook)
module(input).backward(torch.ones(5, 5))
expected_grad = torch.ones(5, 5).mm(module.weight.data) * 2
self.assertEqual(input.grad.data, expected_grad)
def test_zero_grad(self):
i = torch.randn(2, 5, requires_grad=True)
module = nn.Linear(5, 5)
for p in module.parameters():
p.requires_grad = False
module.zero_grad()
module.weight.requires_grad = True
module.zero_grad()
self.assertIsNone(module.weight.grad) # uninitialized grad
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
module.bias.requires_grad = True
module.zero_grad()
self.assertIsNotNone(module.weight.grad)
self.assertIsNone(module.bias.grad)
module(i).sum().backward()
self.assertIsNotNone(module.weight.grad)
self.assertIsNotNone(module.bias.grad)
self.assertGreater(module.weight.grad.data.abs().sum(), 0)
self.assertGreater(module.bias.grad.data.abs().sum(), 0)
module.zero_grad()
self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())
self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())
def test_no_grad(self):
module = nn.Conv2d(2, 5, kernel_size=3, padding=1)
input = torch.randn(1, 2, 10, 10)
x = input
y = input.clone()
output = module(x)
self.assertTrue(output.requires_grad)
output.backward(torch.ones(1, 5, 10, 10))
with torch.no_grad():
output2 = module(y)
self.assertFalse(output2.requires_grad)
self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))
def test_invalid_conv2d(self):
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2)
input = torch.empty(1, 1, 4, 4)
self.assertRaises(RuntimeError, lambda: module(input))
def test_invalid_conv3d(self):
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2)
input = torch.empty(1, 1, 4, 4, 4)
self.assertRaises(RuntimeError, lambda: module(input))
def _test_dropout(self, cls, input):
p = 0.2
input.fill_(1 - p)
module = cls(p)
input_var = torch.tensor(input, requires_grad=True)
output = module(input_var)
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
module = cls(p, True)
input_var = torch.tensor(input.clone(), requires_grad=True)
output = module(input_var + 0)
self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)
output.backward(input)
self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)
# Check that these don't raise errors
module.__repr__()
str(module)
def _test_alpha_dropout(self, cls, input):
mean = input.mean()
std = input.std()
for p in [0.2, 0.5, 0.8]:
module = cls(p)
input_var = torch.tensor(input, requires_grad=True)
output = module(input_var)
# output mean should be close to input mean
self.assertLess(abs(output.data.mean() - mean), 0.1)
# output std should be close to input std
self.assertLess(abs(output.data.std() - std), 0.1)
output.backward(input)
def test_parameters(self):
def num_params(module):
return len(list(module.parameters()))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = Parameter(torch.Tensor(3, 5))
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(num_params(l), 2)
self.assertEqual(num_params(n), 3)
self.assertEqual(num_params(s), 3)
def test_named_parameters(self):
def num_params(module):
return len(dict(module.named_parameters()))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = Parameter(torch.Tensor(3, 5))
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
for name in dict(l.named_parameters()).keys():
self.assertTrue(name in ['bias', 'weight'])
for name in dict(n.named_parameters()).keys():
self.assertTrue(name in ['l1.bias', 'l1.weight', 'param'])
for name in dict(s.named_parameters()).keys():
self.assertTrue(name in ['0.l1.bias', '0.l1.weight', '0.param'])
self.assertEqual(num_params(l), 2)
self.assertEqual(num_params(n), 3)
self.assertEqual(num_params(s), 3)
def test_call_supports_python_dict_output(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = nn.Linear(10, 20)
self.register_backward_hook(self.hook)
self.check_backward_hook_flag = False
def hook(self, module, grad_out, grad_in):
self.check_backward_hook_flag = True
def forward(self, inputs):
return {"output": self.l1(inputs).sum()}
net = Net()
model_output = net(torch.randn([5, 10]))
model_output["output"].backward()
self.assertTrue(net.check_backward_hook_flag)
def test_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(l1, l2, l1, l2, subnet)
self.assertEqual(list(s.children()), [l1, l2, subnet])
def test_dir(self):
linear = nn.Linear(2, 2)
linear._test_submodule = nn.Linear(2, 2)
linear._test_parameter = Parameter(torch.Tensor(2, 2))
linear.register_buffer('_test_buffer', torch.Tensor(2, 2))
keys = dir(linear)
self.assertIn('_test_submodule', keys)
self.assertIn('_test_parameter', keys)
self.assertIn('_test_buffer', keys)
for key in keys:
self.assertTrue(hasattr(linear, key))
def test_repr(self):
# no extra information or sub-modules
empty_sequential = nn.Sequential()
expected_repr_empty = 'Sequential()'
self.assertEqual(repr(empty_sequential), expected_repr_empty)
# one liner extra information
linear = nn.Linear(1, 1)
expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'
self.assertEqual(repr(linear), expected_repr_linear)
# sub-modules repr
sequential = nn.Sequential(linear)
expected_repr_sequential = 'Sequential(\n' \
' (0): Linear(in_features=1, out_features=1, bias=True)\n' \
')'
self.assertEqual(repr(sequential), expected_repr_sequential)
def test_dir_digit(self):
model = nn.Sequential(nn.Linear(2, 2))
keys = dir(model)
self.assertNotIn('0', keys)
def test_named_children(self):
l1 = nn.Linear(2, 2)
l2 = nn.Linear(2, 2)
l3 = nn.Linear(2, 2)
l4 = nn.Linear(2, 2)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential()
with self.assertRaises(KeyError):
s.add_module('', l1)
with self.assertRaises(KeyError):
s.add_module('name.with.dot', l1)
s.add_module('layer1', l1)
s.add_module('layer2', l2)
s.add_module('layer3', l1)
s.add_module('layer4', l2)
s.add_module('subnet', subnet)
self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])
def test_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
l = nn.Linear(10, 20)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.modules()), [s, n, l])
def test_named_modules(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l1 = l
self.l2 = l
self.param = torch.empty(3, 5)
self.block = block
l = nn.Linear(10, 20)
l1 = nn.Linear(10, 20)
l2 = nn.Linear(10, 20)
block = nn.Sequential()
block.add_module('linear1', l1)
block.add_module('linear2', l2)
n = Net()
s = nn.Sequential(n, n, n, n)
self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),
('0.block', block), ('0.block.linear1', l1),
('0.block.linear2', l2)])
def test_register_buffer_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'buffer name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_buffer(1, torch.rand(5))
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_buffer(None, torch.rand(5))
def test_register_buffer_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_buffer('attribute_name', torch.rand(5))
def test_register_buffer_raises_error_if_not_tensor(self):
m = nn.Module()
with self.assertRaises(TypeError):
m.register_buffer('attribute_name', 5)
def test_register_buffer_allows_overwriting_with_same_name(self):
m = nn.Module()
buffer1 = torch.rand(5)
buffer2 = buffer1 + 5
buffer3 = None
m.register_buffer('buffer_name', buffer1)
self.assertEqual(m.buffer_name, buffer1)
m.register_buffer('buffer_name', buffer2)
self.assertEqual(m.buffer_name, buffer2)
m.register_buffer('buffer_name', buffer3)
self.assertEqual(m.buffer_name, buffer3)
def test_register_parameter_raises_error_if_name_is_not_string(self):
m = nn.Module()
expected_error = 'parameter name should be a string. Got '
with self.assertRaisesRegex(TypeError, expected_error + 'int'):
m.register_parameter(1, nn.Parameter())
with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):
m.register_parameter(None, nn.Parameter())
def test_register_parameter_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
del m.attribute_name
m.add_module('attribute_name', nn.Module())
with self.assertRaises(KeyError):
m.register_parameter('attribute_name', nn.Parameter())
def test_register_parameter_allows_overwriting_with_same_name(self):
m = nn.Module()
param1 = nn.Parameter(torch.rand(5))
param2 = nn.Parameter(param1.data + 5)
param3 = None
m.register_parameter('param_name', param1)
self.assertEqual(m.param_name, param1)
m.register_parameter('param_name', param2)
self.assertEqual(m.param_name, param2)
m.register_parameter('param_name', param3)
self.assertEqual(m.param_name, param3)
def test_add_module_raises_error_if_attr_exists(self):
m = nn.Module()
m.attribute_name = 5
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
del m.attribute_name
m.register_buffer('attribute_name', torch.rand(5))
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
del m.attribute_name
m.register_parameter('attribute_name', nn.Parameter())
with self.assertRaises(KeyError):
m.add_module('attribute_name', nn.Module())
def test_Sequential_getitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
self.assertIs(n[0], l1)
self.assertIs(n[1], l2)
self.assertIs(n[2], l3)
self.assertIs(n[3], l4)
self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)
self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))
self.assertEqual(n[3:], nn.Sequential(l4))
self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))
self.assertEqual(n[:-3], nn.Sequential(l1))
self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))
def test_Sequential_setitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3)
n[0] = l4
n[-1] = l4
n[torch.tensor(1, dtype=torch.int16)] = l1
self.assertIs(n[0], l4)
self.assertIs(n[1], l1)
self.assertIs(n[2], l4)
def test_Sequential_setitem_named(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(OrderedDict([
('linear1', l1),
('linear2', l2),
('linear3', l3),
]))
n[0] = l4
n[-1] = l4
self.assertEqual(n.linear1, l4)
self.assertEqual(n.linear3, l4)
def test_Sequential_delitem(self):
l1 = nn.Linear(10, 20)
l2 = nn.Linear(20, 30)
l3 = nn.Linear(30, 40)
l4 = nn.Linear(40, 50)
n = nn.Sequential(l1, l2, l3, l4)
del n[-1]
self.assertEqual(n, nn.Sequential(l1, l2, l3))
del n[1::2]
self.assertEqual(n, nn.Sequential(l1, l3))
def test_ModuleList(self):
modules = [nn.ReLU(), nn.Linear(5, 5)]
module_list = nn.ModuleList(modules)
def check():
self.assertEqual(len(module_list), len(modules))
for m1, m2 in zip(modules, module_list):
self.assertIs(m1, m2)
for m1, m2 in zip(modules, module_list.children()):
self.assertIs(m1, m2)
for i in range(len(modules)):
self.assertIs(module_list[i], modules[i])
check()
modules += [nn.Conv2d(3, 4, 3)]
module_list += [modules[-1]]
check()
modules.append(nn.Tanh())
module_list.append(modules[-1])
check()
next_modules = [nn.Linear(5, 5), nn.Sigmoid()]
modules.extend(next_modules)
module_list.extend(next_modules)
check()
modules[2] = nn.Conv2d(5, 3, 2)
module_list[2] = modules[2]
check()
idx = torch.tensor(2, dtype=torch.int32)
modules[2] = nn.Conv2d(5, 3, 2)
module_list[idx] = modules[2]
self.assertIs(module_list[idx], modules[2])
check()
self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))
self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))
self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))
self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))
self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))
del module_list[-1]
self.assertEqual(module_list, nn.ModuleList(modules[:-1]))
del module_list[1::2]
self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))
with self.assertRaises(TypeError):
module_list += nn.ReLU()
with self.assertRaises(TypeError):
module_list.extend(nn.ReLU())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
modules = list(s.modules())
module_list = nn.ModuleList()
module_list.extend(s.modules())
check()
def test_ModuleDict(self):
modules = OrderedDict([
('act', nn.ReLU()),
('conv', nn.Conv2d(10, 10, 5)),
('fc', nn.Linear(5, 5)),
])
module_dict = nn.ModuleDict(modules)
def check():
self.assertEqual(len(module_dict), len(modules))
for k1, m2 in zip(modules, module_dict.children()):
self.assertIs(modules[k1], m2)
for k1, k2 in zip(modules, module_dict):
self.assertIs(modules[k1], module_dict[k2])
for k in module_dict:
self.assertIs(module_dict[k], modules[k])
for k in module_dict.keys():
self.assertIs(module_dict[k], modules[k])
for k, v in module_dict.items():
self.assertIs(modules[k], v)
for k1, m2 in zip(modules, module_dict.values()):
self.assertIs(modules[k1], m2)
for k in modules.keys():
self.assertTrue(k in module_dict)
check()
modules['conv'] = nn.Conv2d(3, 4, 3)
module_dict['conv'] = modules['conv']
check()
next_modules = [
('fc2', nn.Linear(5, 5)),
('act', nn.Sigmoid()),
]
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = OrderedDict([
('fc3', nn.Linear(5, 5)),
('act2', nn.Sigmoid()),
])
modules.update(next_modules)
module_dict.update(next_modules)
check()
next_modules = {
'fc4': nn.Linear(5, 5),
'act3': nn.Sigmoid()
}
modules.update(sorted(next_modules.items()))
module_dict.update(next_modules)
check()
del module_dict['fc']
del modules['fc']
check()
with self.assertRaises(TypeError):
module_dict.update(nn.ReLU())
with self.assertRaises(TypeError):
module_dict.update([nn.ReLU()])
with self.assertRaises(ValueError):
module_dict.update([[nn.ReLU()]])
with self.assertRaises(TypeError):
module_dict[1] = nn.ReLU()
s = nn.Sequential(modules)
module_dict = nn.ModuleDict(s.named_children())
check()
c = module_dict.pop('conv')
self.assertIs(c, modules['conv'])
modules.pop('conv')
check()
module_dict.clear()
self.assertEqual(len(module_dict), 0)
modules.clear()
check()
def test_ParameterList(self):
def make_param():
return Parameter(torch.randn(10, 10))
parameters = [make_param(), make_param()]
param_list = nn.ParameterList(parameters)
def check():
self.assertEqual(len(parameters), len(param_list))
for p1, p2 in zip(parameters, param_list):
self.assertIs(p1, p2)
for p1, p2 in zip(parameters, param_list.parameters()):
self.assertIs(p1, p2)
for i in range(len(parameters)):
self.assertIs(parameters[i], param_list[i])
check()
parameters += [make_param()]
param_list += [parameters[-1]]
check()
parameters.append(make_param())
param_list.append(parameters[-1])
check()
next_params = [make_param(), make_param()]
parameters.extend(next_params)
param_list.extend(next_params)
check()
parameters[2] = make_param()
param_list[2] = parameters[2]
check()
idx = torch.tensor(2, dtype=torch.int32)
parameters[2] = make_param()
param_list[idx] = parameters[2]
self.assertIs(param_list[idx], parameters[2])
check()
self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))
self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))
self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))
self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))
self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))
with self.assertRaises(TypeError):
param_list += make_param()
with self.assertRaises(TypeError):
param_list.extend(make_param())
l1 = nn.Linear(1, 2)
l2 = nn.Linear(2, 3)
l3 = nn.Linear(3, 2)
l4 = nn.Linear(2, 3)
subnet = nn.Sequential(l3, l4)
s = nn.Sequential(
OrderedDict([
("layer1", l1),
("layer2", l2),
("layer3", l3),
("layer4", l4),
("subnet_layer", subnet)
])
)
parameters = list(s.parameters())
param_list = nn.ParameterList()
param_list.extend(s.parameters())
check()
def test_ParameterDict(self):
parameters = OrderedDict([
('p1', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
('p3', Parameter(torch.randn(10, 10))),
])
parameter_dict = nn.ParameterDict(parameters)
def check():
self.assertEqual(len(parameter_dict), len(parameters))
for k1, m2 in zip(parameters, parameter_dict.parameters()):
self.assertIs(parameters[k1], m2)
for k1, k2 in zip(parameters, parameter_dict):
self.assertIs(parameters[k1], parameter_dict[k2])
for k in parameter_dict:
self.assertIs(parameter_dict[k], parameters[k])
for k in parameter_dict.keys():
self.assertIs(parameter_dict[k], parameters[k])
for k, v in parameter_dict.items():
self.assertIs(v, parameters[k])
for k1, m2 in zip(parameters, parameter_dict.values()):
self.assertIs(parameters[k1], m2)
for k in parameters.keys():
self.assertTrue(k in parameter_dict)
check()
parameters['p4'] = Parameter(torch.randn(10, 10))
parameter_dict['p4'] = parameters['p4']
check()
next_parameters = [
('p5', Parameter(torch.randn(10, 10))),
('p2', Parameter(torch.randn(10, 10))),
]
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = OrderedDict([
('p6', Parameter(torch.randn(10, 10))),
('p5', Parameter(torch.randn(10, 10))),
])
parameters.update(next_parameters)
parameter_dict.update(next_parameters)
check()
next_parameters = {
'p8': Parameter(torch.randn(10, 10)),
'p7': Parameter(torch.randn(10, 10))
}
parameters.update(sorted(next_parameters.items()))
parameter_dict.update(next_parameters)
check()
del parameter_dict['p3']
del parameters['p3']
check()
with self.assertRaises(TypeError):
parameter_dict.update(1)
with self.assertRaises(TypeError):
parameter_dict.update([1])
with self.assertRaises(ValueError):
parameter_dict.update(Parameter(torch.randn(10, 10)))
with self.assertRaises(TypeError):
parameter_dict[1] = Parameter(torch.randn(10, 10))
p_pop = parameter_dict.pop('p4')
self.assertIs(p_pop, parameters['p4'])
parameters.pop('p4')
check()
parameter_dict.clear()
self.assertEqual(len(parameter_dict), 0)
parameters.clear()
check()
def test_add_module(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
self.assertEqual(net.l, l)
self.assertEqual(net.l2, l)
self.assertEqual(net.empty, None)
net.add_module('l3', l)
self.assertEqual(net.l3, l)
l3 = nn.Linear(20, 10)
net.add_module('l', l3)
self.assertEqual(net.l, l3)
self.assertRaises(TypeError, lambda: net.add_module('x', 'non-module'))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',
lambda: net.add_module(1, l))
self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',
lambda: net.add_module(None, l))
def test_module_to_argparse(self):
net = nn.Sequential(nn.Linear(3, 3))
cpu = torch.device('cpu')
with self.assertRaises(TypeError):
net.to(cpu, True)
with self.assertRaises(TypeError):
net.to(torch.long)
with self.assertRaises(TypeError):
net.to(None, True)
with self.assertRaises(TypeError):
net.to(cpu, torch.long, True)
with self.assertRaises(TypeError):
net.to(cpu, dtype=torch.long, non_blocking=True)
with self.assertRaises(TypeError):
net.to([])
with self.assertRaises(TypeError):
net.to({}, non_blocking=True)
with self.assertRaises(TypeError):
net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)
with self.assertRaises(TypeError):
net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)
def test_type(self):
l = nn.Linear(10, 20)
net = nn.Module()
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to(torch.half)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.cpu()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.to("cuda", torch.double, True)
self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.empty(1, device="cuda:0", dtype=torch.half))
self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)
self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.to(torch.device("cpu"), non_blocking=True)
self.assertIsInstance(l.weight.data, torch.HalfTensor)
self.assertIsInstance(l.bias.data, torch.HalfTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.type(torch.FloatTensor)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
net.to(torch.DoubleTensor(1))
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
if TEST_CUDA:
net.type(torch.cuda.FloatTensor)
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
def test_non_leaf_parameters(self):
l1 = nn.Linear(10, 10)
l2 = nn.Linear(10, 10)
def assign_weight():
l2.weight = l1.weight + 2
self.assertRaises(TypeError, assign_weight)
# This should work though
l2.weight = Parameter(torch.randn(10, 10))
def test_clip_grad_norm(self):
l = nn.Linear(10, 10)
max_norm = 2
def compute_norm(norm_type):
norm_type = float(norm_type)
if norm_type != inf:
total_norm = 0
for p in l.parameters():
total_norm += p.grad.data.abs().pow(norm_type).sum()
return pow(total_norm, 1. / norm_type)
else:
return max(p.grad.data.abs().max() for p in l.parameters())
def compare_scaling(grads):
p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]
scale = torch.cat(p_scale)
self.assertEqual(scale.std(), 0)
return scale[0]
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = Variable(g.clone().view_as(p.data))
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_after, max_norm)
self.assertLessEqual(norm_after, norm_before)
compare_scaling(grads)
# Small gradients should be left unchanged
grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p.grad.data.copy_(g)
norm_before = compute_norm(norm_type)
norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)
norm_after = compute_norm(norm_type)
self.assertEqual(norm, norm_before)
self.assertEqual(norm_before, norm_after)
self.assertLessEqual(norm_after, max_norm)
scale = compare_scaling(grads)
self.assertEqual(scale, 1)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(1., 101).view(10, 10)
p1._grad = g.clone()
p2._grad = g.clone()
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
clip_grad_norm_(p1, max_norm, norm_type=norm_type)
clip_grad_norm_([p2], max_norm, norm_type=norm_type)
self.assertEqual(p1.grad, p2.grad)
def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
clip_grad_value_(l.parameters(), clip_value)
for p in filter(lambda p: p.grad is not None, l.parameters()):
self.assertLessEqual(p.grad.data.max(), clip_value)
self.assertGreaterEqual(p.grad.data.min(), -clip_value)
# Should accept a single Tensor as input
p1, p2 = torch.randn(10, 10), torch.randn(10, 10)
g = torch.arange(-50., 50).view(10, 10).div_(5)
p1._grad = g.clone()
p2._grad = g.clone()
clip_grad_value_(p1, clip_value)
clip_grad_value_([p2], clip_value)
self.assertEqual(p1.grad, p2.grad)
def test_parameters_to_vector(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = parameters_to_vector(model.parameters())
self.assertEqual(vec.size(0), 980)
def test_vector_to_parameters(self):
conv1 = nn.Conv2d(3, 10, 5)
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)
vec = Variable(torch.arange(0., 980))
vector_to_parameters(vec, model.parameters())
sample = next(model.parameters())[0, 0, 0]
self.assertTrue(torch.equal(sample.data, vec.data[:5]))
# We don't want to make propagating NaN a hard requirement on ops, but for
# these easy ones, we should make them do so.
def _test_nonlinearity_propagate_nan(self, device):
def test(nonlinearity, *args, **kwargs):
x = torch.tensor([nan], device=device)
fn = getattr(F, nonlinearity)
try:
self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))
except Exception as e:
if 'not implemented' not in str(e):
raise
test('relu')
test('relu', inplace=True)
test('relu6')
test('elu')
test('selu')
test('celu')
test('rrelu')
test('rrelu', inplace=True)
test('hardtanh')
test('tanh')
test('sigmoid')
test('logsigmoid')
test('hardshrink')
test('tanhshrink')
test('softsign')
test('softmin', 0)
test('softmax', 0)
test('log_softmax', 0)
test('leaky_relu', 0.2)
def test_nonlinearity_propagate_nan(self):
self._test_nonlinearity_propagate_nan('cpu')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_nonlinearity_propagate_nan_cuda(self):
self._test_nonlinearity_propagate_nan('cuda')
def test_weight_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
expected_output = m(input)
# add weight normalization
m = torch.nn.utils.weight_norm(m)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (7, 1))
self.assertEqual(m(input), expected_output)
# remove weight norm
m = torch.nn.utils.remove_weight_norm(m)
self.assertFalse(hasattr(m, 'weight_g'))
self.assertFalse(hasattr(m, 'weight_v'))
self.assertEqual(m(input), expected_output)
# test with dim=1
m = torch.nn.utils.weight_norm(m, dim=1)
self.assertEqual(m.weight_v.size(), m.weight.size())
self.assertEqual(m.weight_g.size(), (1, 5))
self.assertEqual(m(input), expected_output)
def test_weight_norm_pickle(self):
m = torch.nn.utils.weight_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_spectral_norm(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))
# weight_orig should be trainable
self.assertTrue(hasattr(m, 'weight_orig'))
self.assertTrue('weight_orig' in m._parameters)
# weight_u should be just a reused buffer
self.assertTrue(hasattr(m, 'weight_u'))
self.assertTrue('weight_u' in m._buffers)
self.assertTrue('weight' in m._buffers)
# weight should be a plain attribute, not counted as a buffer or a param
self.assertFalse('weight' in m._parameters)
# it should also be sharing storage as `weight_orig`
self.assertEqual(m.weight_orig.storage(), m.weight.storage())
self.assertEqual(m.weight_orig.size(), m.weight.size())
self.assertEqual(m.weight_orig.stride(), m.weight.stride())
m = torch.nn.utils.remove_spectral_norm(m)
self.assertFalse(hasattr(m, 'weight_orig'))
self.assertFalse(hasattr(m, 'weight_u'))
# weight should be converted back as a parameter
self.assertTrue(hasattr(m, 'weight'))
self.assertTrue('weight' in m._parameters)
def test_spectral_norm_eval_remove(self):
inp = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
x0 = m(inp)
m.eval()
# test that eval mode and removing / adding+removing doesn't change weight and output
x1 = m(inp)
x2 = m(inp)
self.assertEqual(x0, x1)
self.assertEqual(x0, x2)
# test that we can backward several times without running into problems
x1 = m(inp)
x1.sum().backward()
x1 = m(inp)
x1.sum().backward()
# test removing
m = torch.nn.utils.remove_spectral_norm(m)
x3 = m(inp)
self.assertEqual(x0, x3)
m = torch.nn.utils.spectral_norm(m)
m = torch.nn.utils.remove_spectral_norm(m)
x4 = m(inp)
self.assertEqual(x0, x4)
# check that removing after train doesn't change output
m.train()
m = torch.nn.utils.spectral_norm(m)
for i in range(5):
x0 = m(inp)
m = torch.nn.utils.remove_spectral_norm(m)
x1 = m(inp)
self.assertEqual(x0, x1)
def test_spectral_norm_dim(self):
inp = torch.randn(2, 3, 10, 12)
m = nn.ConvTranspose2d(3, 4, (5, 6))
m = torch.nn.utils.spectral_norm(m)
# this should not run into incompatible shapes
x = m(inp)
# check that u refers to the same dimension
self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)
def test_spectral_norm_forward(self):
input = torch.randn(3, 5)
m = nn.Linear(5, 7)
m = torch.nn.utils.spectral_norm(m)
# naive forward
_weight, _bias, _u = m.weight_orig, m.bias, m.weight_u
_weight_mat = _weight.view(_weight.size(0), -1)
_v = torch.mv(_weight_mat.t(), _u)
_v = F.normalize(_v, dim=0, eps=1e-12)
_u = torch.mv(_weight_mat, _v)
_u = F.normalize(_u, dim=0, eps=1e-12)
_weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))
out_hat = torch.nn.functional.linear(input, _weight, _bias)
expect_out = m(input)
self.assertAlmostEqual(expect_out, out_hat)
def test_spectral_norm_pickle(self):
m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))
m = pickle.loads(pickle.dumps(m))
self.assertIsInstance(m, nn.Linear)
def test_embedding_sparse_basic(self):
embedding = nn.Embedding(10, 20, sparse=True)
input = Variable(torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]]))
embedding(input).sum().backward()
self.assertTrue(embedding.weight.grad.is_sparse)
self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)
def test_embedding_padding_idx(self):
embedding = nn.Embedding(10, 20, padding_idx=0)
input = Variable(torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True)
input = Variable(torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][0].sum(), 0)
self.assertEqual(output[1][2].sum(), 0)
# negative indexing check for padding_idx
# padding_idx=-2, num_embeddings=10 ==> index 8 padded
embedding = nn.Embedding(10, 20, padding_idx=-2)
input = Variable(torch.LongTensor([[0, 2, 8, 5], [4, 8, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True)
input = Variable(torch.LongTensor([[0, 2, 8, 5], [4, 8, 0, 9]]))
output = embedding(input)
self.assertEqual(output[0][2].sum(), 0)
self.assertEqual(output[1][1].sum(), 0)
# out of bounds check for padding_idx
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)
self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)
# test backward when input contains padding_idx
padding_idx = 0
embedding = nn.Embedding(5, 2, padding_idx=padding_idx)
for n in (1, 2):
for other_indices in ([], [1, 3], [2]):
indices = torch.LongTensor(other_indices + [padding_idx] * n)
pre = embedding.weight[padding_idx].clone()
embedding(indices).sum().backward()
after = (embedding.weight + embedding.weight.grad)[padding_idx]
embedding.zero_grad()
self.assertEqual(after, pre)
def test_embedding_max_norm(self):
embedding = nn.Embedding(22, 5, max_norm=1.0)
input = Variable(torch.LongTensor([2, 8, 8, 6]))
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_embedding_max_norm_cuda(self, dtype=torch.float):
embedding = nn.Embedding(22, 5, max_norm=1.0).to("cuda", dtype=dtype)
# nn.Embedding only takes LongTensor as input
input = torch.tensor([2, 8, 8, 6], device="cuda", dtype=torch.long)
output = embedding(input)
self.assertEqual(output[1], output[2])
self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())
def test_embedding_from_pretrained(self):
a = torch.Tensor([[1, 2, 3], [4, 5, 6]])
embedding = nn.Embedding.from_pretrained(a)
self.assertEqual(a, embedding.weight.data)
input = Variable(torch.LongTensor([0, 1]))
output = embedding(input)
self.assertEqual(a, output)
def test_embedding_functional(self):
a = torch.tensor([
[1, 3, 2],
[0, 2, 1]
], dtype=torch.long)
embeddings = torch.rand(4, 3, requires_grad=True)
embed_old = torch.nn.Embedding(4, 3)
embed_old.weight.data = embeddings.data
res_old = embed_old(a)
res_F = F.embedding(a, embeddings)
self.assertEqual(res_old, res_F)
def _test_gumbel_softmax_st(self, cuda, dtype=torch.float):
th = torch.cuda if cuda else torch
"""
Things we might want to check:
- if we make various draws, do we get different one-hot values?
- is the proportion approximately in line with the softmax values?
- with hard, is it one-hot?
- with hard, is there still a gradient?
"""
num_draws = 100
K = 3
logits = torch.tensor([[0.2, 0.8, 0.1]])
if dtype != torch.half:
logits = logits.to(dtype)
logits_softmax = torch.nn.functional.softmax(logits, 1)
y_draws = torch.zeros(num_draws, K)
preds = torch.zeros(num_draws)
if cuda:
logits = logits.cuda()
y_draws = y_draws.cuda()
preds = preds.cuda()
exceed_limits = 0
for draw in range(num_draws):
logits_var = torch.tensor(logits, requires_grad=True)
y_draw = torch.nn.functional.gumbel_softmax(
logits_var,
hard=True)
assert y_draw.size() == logits.size()
# check we have a gradient
assert y_draw.requires_grad
err = y_draw - logits.new_tensor([[0, 0.5, 0.3]])
loss = (err * err).sum()
loss.backward()
if logits_var.grad.std() < 0.01 or logits_var.grad.std() > 1.0:
exceed_limits += 1
y_draws[draw] = y_draw.data
_, pred = y_draw.max(1)
preds[draw] = pred.data[0]
assert exceed_limits / num_draws < 0.05
# check it's approximately one-hot
num_ones = (y_draws == 1).int().sum()
num_zeros = (y_draws == 0).int().sum()
assert num_ones + num_zeros == num_draws * K
assert num_ones == num_draws
# check output classes approx in line with logits
num_class_one = (preds == 1).int().sum()
assert num_class_one < num_draws
assert num_class_one > num_draws / 3
def test_gumbel_softmax_st(self):
self._test_gumbel_softmax_st(False)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_gumbel_softmax_st_cuda(self, dtype=torch.float):
self._test_gumbel_softmax_st(True, dtype=dtype)
def _test_EmbeddingBag(self, cuda, mode, sparse, dtype=torch.double):
# check a known test example
device = torch.device("cuda") if cuda else torch.device("cpu")
es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, dtype)
es.weight.data.copy_(torch.arange(1, 11, device=device, dtype=dtype).view_as(es.weight))
input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=torch.long)
# Empty list is only handled in CPU for now
offsets = torch.tensor([0, 3], device=device, dtype=torch.long) if cuda \
else torch.tensor([0, 0, 3, 3, 6], device=device, dtype=torch.long)
grad_output = torch.tensor(
[1, 2,
3, 4], device=device, dtype=dtype).view(2, 2)
grad_output_with_empty = torch.tensor(
[99, 99,
1, 2,
99, 99,
3, 4,
99, 99], device=device, dtype=dtype).view(5, 2)
if mode == "sum" or mode == "mean":
denominator = 1 if mode == "sum" else 3
expected_output = torch.tensor(
[[13, 16],
[13, 16]], device=device, dtype=dtype) / denominator
expected_output_with_empty = torch.tensor(
[[0, 0],
[13, 16],
[0, 0],
[13, 16],
[0, 0]], device=device, dtype=dtype) / denominator
expected_grad_weight = torch.tensor(
[[3, 4],
[5, 8],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=dtype) / denominator
elif mode == "max":
expected_output = torch.tensor(
[[7, 8],
[9, 10]], device=device, dtype=dtype)
expected_output_with_empty = torch.tensor(
[[0, 0],
[7, 8],
[0, 0],
[9, 10],
[0, 0]], device=device, dtype=dtype)
expected_grad_weight = torch.tensor(
[[0, 0],
[0, 0],
[0, 0],
[1, 2],
[3, 4]], device=device, dtype=dtype)
output = es(input, offsets)
output.backward(grad_output if cuda else grad_output_with_empty)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
self.assertEqual(
output.data,
expected_output if cuda else expected_output_with_empty)
self.assertEqual(es_weight_grad, expected_grad_weight, dtype2prec[dtype])
# check same example except as 2D (2 x 3)
input = input.data.view(2, -1)
es.zero_grad()
output = es(input)
output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
self.assertEqual(output.data, expected_output)
self.assertEqual(es_weight_grad, expected_grad_weight, dtype2prec[dtype])
# now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length
def _test_vs_Embedding(N, D, B, L, max_norm=None):
es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, dtype)
e = nn.Embedding(N, D, max_norm=max_norm).to(device, dtype)
e.weight.data.copy_(es.weight.data)
input = torch.randint(N, (B, L), device=device, dtype=torch.long)
offsets = torch.arange(0, B, device=device, dtype=torch.long).mul_(L)
grad_output = torch.rand(B, D, device=device, dtype=dtype)
output = es(input.view(-1), offsets)
if mode == 'sum':
ref_output = e(input).sum(1)
elif mode == 'mean':
ref_output = e(input).mean(1)
elif mode == 'max':
ref_output = e(input).max(1)[0]
self.assertEqual(output, ref_output, dtype2prec[dtype])
output.backward(grad_output)
ref_output.backward(grad_output)
es_weight_grad = es.weight.grad.data
if sparse:
es_weight_grad = es.weight.grad.data.to_dense()
# We have more floating point error here because we are dealing with larger numbers
needed_prec = dtype2prec[dtype] * 2
self.assertEqual(es_weight_grad, e.weight.grad, needed_prec)
N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)
_test_vs_Embedding(N, D, B, L)
for max_norm in (None, 3):
for p in itertools.product([1, 2], repeat=4):
_test_vs_Embedding(*p, max_norm=max_norm)
# check that giving illegal input combos raises error
es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)
input = torch.ones(3, 4)
offset = torch.arange(0, 3)
self.assertRaises(ValueError, lambda: es(input, offset))
self.assertRaises(ValueError, lambda: es(input.view(-1)))
offset[0] = 1
self.assertRaises(ValueError, lambda: es(input.view(-1), offset))
offset[0] = 0
offset[-1] = 100
self.assertRaises(ValueError, lambda: es(input.view(-1), offset))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pool3d_size_one_feature_dim(self):
# Tests crazy strides for feature dim of size 1
x = Variable(torch.randn(7, 1, 5, 3, 2, device="cuda"))
strange_strides = [30, 1234, 6, 2, 1]
y = x.as_strided(x.size(), strange_strides)
x = x.cpu().as_strided(x.size(), strange_strides)
to_test = {
'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),
}
for test, fn in to_test.items():
# Should not crash
out_y = fn(y)
out_x = fn(x)
self.assertEqual(out_y, out_x.cuda(), test)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_AvgPool3d_backward_after_cat_dim1_cuda(self):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 3, 4, 4, 4, device="cuda", requires_grad=True)
y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)
grad = torch.randn(y.size(), device="cuda")
# increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1
stride = list(grad.stride())
stride[0] = stride[0] * 2
grad.set_(grad.storage(), 0, grad.size(), stride)
assert grad.is_contiguous()
y.backward(grad)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_contig_wrong_stride_cudnn(self):
# x has to have batch_size 1 to test contiguous checks
x = torch.randn(1, 16, 5, 5, device="cuda")
stride = list(x.stride())
stride[0] = 20
# change the stride in dimension 0. the tensor is still contiguous because size[0] is 1
x.set_(x.storage(), 0, x.size(), stride)
self.assertTrue(x.is_contiguous())
F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device="cuda"))
F.conv2d(x, torch.randn(1, 16, 1, 1, device="cuda"))
def test_embedding_bag(self):
self._test_EmbeddingBag(False, 'sum', False)
self._test_EmbeddingBag(False, 'mean', False)
self._test_EmbeddingBag(False, 'max', False)
self._test_EmbeddingBag(False, 'sum', True)
self._test_EmbeddingBag(False, 'mean', True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_embedding_bag_cuda(self, dtype=torch.float):
self._test_EmbeddingBag(True, 'sum', False, dtype)
self._test_EmbeddingBag(True, 'mean', False, dtype)
self._test_EmbeddingBag(True, 'max', False, dtype)
if dtype != torch.half:
# torch.cuda.sparse.HalfTensor is not enabled.
self._test_EmbeddingBag(True, 'sum', True, dtype)
self._test_EmbeddingBag(True, 'mean', True, dtype)
def test_fractional_max_pool2d(self):
x = torch.randn(1, 2, 7, 7, requires_grad=True)
samples = x.new(1, 2, 2).uniform_()
def func(x):
return F.fractional_max_pool2d(
x, (2, 2), output_size=(3, 3), _random_samples=samples)
self.assertEqual(func(x).shape, (1, 2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
x = torch.randn(2, 7, 7, requires_grad=True)
samples = x.new(2, 2).uniform_()
self.assertEqual(func(x).shape, (2, 3, 3))
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_Dropout(self):
input = torch.Tensor(1000)
self._test_dropout(nn.Dropout, input)
def test_Dropout2d(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
num_features = 1000
input = torch.Tensor(num_features, b, w, h)
self._test_dropout(nn.Dropout2d, input)
def test_Dropout3d(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.Tensor(num_features, b, d, w, h)
self._test_dropout(nn.Dropout3d, input)
def test_AlphaDropout(self):
# generate random tensor with zero mean and unit std
input = torch.randn(5000)
self._test_alpha_dropout(nn.AlphaDropout, input)
def test_FeatureAlphaDropout(self):
b = random.randint(1, 5)
w = random.randint(1, 5)
h = random.randint(1, 5)
d = random.randint(1, 2)
num_features = 1000
input = torch.randn(num_features, b, d, w, h)
self._test_alpha_dropout(nn.FeatureAlphaDropout, input)
def _test_InstanceNorm_general(self, cls, input, device="cpu", dtype=torch.float):
# default case track_running_stats=False
b, c = input.size(0), input.size(1)
input_var = torch.tensor(input, device=device, dtype=dtype, requires_grad=True)
IN = cls(c, eps=0).to(device, dtype)
output = IN(input_var)
out_reshaped = output.view(b * c, -1)
mean = out_reshaped.mean(1)
var = out_reshaped.var(1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5)
# check that eval mode doesn't change behavior
grad_out = torch.randn_like(output)
res1 = output.data.clone()
output.backward(grad_out)
grad1 = input_var.grad.data.clone()
IN.eval()
output = IN(input_var)
input_var.grad = None
output.backward(grad_out)
res2 = output.data
grad2 = input_var.grad.data
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# If track_running_stats=True and momentum=1, running_mean/var should be
# equal to mean/var of the input (with unbias correction)
IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)
output = IN(input_var)
input_reshaped = input_var.transpose(1, 0).reshape(c, -1)
mean = input_reshaped.mean(1)
input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)
var = input_reshaped.var(2, unbiased=True)[:, :]
self.assertAlmostEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, delta=1e-5)
# in eval mode, adding X * std to a channel in input should make the
# corresponding channel in output have mean X
IN.eval()
delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)
delta = delta.view(-1, *[1 for _ in range(2, input.dim())])
output = IN(input_var + delta)
self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c))
def _test_InstanceNorm_cuda_half(self, cls, input):
# THNN
input = Variable(input.cuda().half().random_(1, 10), requires_grad=True)
m = cls(input.size(1), affine=True, track_running_stats=True).to("cuda", torch.half)
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqual(thnn_output.type(), input.type())
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqual(cudnn_output.type(), input.type())
self.assertAlmostEqual(cudnn_output, thnn_output, delta=1e-4)
self.assertAlmostEqual(cudnn_input_grad, thnn_input_grad, delta=1e-3)
def test_InstanceNorm1d_general(self):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, dtype=torch.float)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_InstanceNorm1d_general_cuda(self):
b = random.randint(3, 5)
c = random.randint(3, 5)
d = random.randint(8, 10)
input = torch.rand(b, c, d)
self._test_InstanceNorm_general(nn.InstanceNorm1d, input, "cuda", torch.float)
self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input)
def test_InstanceNorm2d_general(self):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, dtype=torch.float)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_InstanceNorm2d_general_cuda(self):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(3, 6)
h = random.randint(6, 8)
input = torch.rand(b, c, h, w)
self._test_InstanceNorm_general(nn.InstanceNorm2d, input, "cuda", torch.float)
self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input)
def test_InstanceNorm3d_general(self):
b = random.randint(3, 5)
c = random.randint(3, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, dtype=torch.float)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_InstanceNorm3d_general_cuda(self):
b = random.randint(3, 5)
c = random.randint(2, 5)
w = random.randint(2, 5)
h = random.randint(2, 5)
d = random.randint(2, 5)
input = torch.rand(b, c, h, w, d)
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, "cuda", torch.float)
self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input)
def _test_LayerNorm_general(self, device="cpu", dtype=torch.float):
for i in range(2, 6):
shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
normalized_ndim = random.randint(1, i - 1) # inclusive
normalized_shape = shape[-normalized_ndim:]
unnormalized_shape = shape[:-normalized_ndim]
# test that LN normalizes to mean 0 and stddev 1
ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)
ln.weight.data.fill_(1)
ln.bias.data.fill_(0)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), 1, delta=1e-5)
# test that LN applies weight and bias correctly
scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()
ln.weight.data.fill_(scale)
ln.bias.data.fill_(bias)
output = ln(x)
out_reshaped = output.view(*(unnormalized_shape + [-1]))
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean.data).mean(), bias, delta=1e-5)
self.assertAlmostEqual(torch.abs(var.data).mean(), scale ** 2, delta=1e-5)
bad_norm_shape_input_shape = {
(): (),
(2, 3): (3,),
(2,): (1, 2, 3),
(10,): (2, 3),
10: (2, 3),
}
for norm_shape, input_shape in bad_norm_shape_input_shape.items():
ln = nn.LayerNorm(norm_shape)
input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: ln(input))
def _test_LayerNorm_cuda_half(self):
input = Variable(torch.empty(2, 3, 3, 2).to("cuda", torch.half).random_(1, 10), requires_grad=True)
m = nn.LayerNorm([3, 2]).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqual(output.type(), input.type())
def test_LayerNorm_general(self):
self._test_LayerNorm_general()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_LayerNorm_general_cuda(self):
self._test_LayerNorm_general("cuda")
self._test_LayerNorm_cuda_half()
def _test_GroupNorm_general(self, device="cpu", dtype=torch.float):
good_shape_g = {
(1, 2, 3, 4): 2,
(2, 3, 10): 3,
(3, 1, 1, 1, 2): 1,
(2, 6, 4, 2, 2): 3,
}
for shape, g in good_shape_g.items():
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
b = shape[0]
c = shape[1]
# test that GN normalizes to mean 0 and stddev 1
gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)
gn.weight.data.fill_(1)
gn.bias.data.fill_(0)
output = gn(x)
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var).mean(), 1, delta=1e-5)
# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
gn.weight.data.copy_(scale)
gn.bias.data.copy_(bias)
output = gn(x)
out_reshaped = output.view(b, c, -1)
out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
self.assertAlmostEqual(torch.abs(mean).mean(), 0, delta=1e-5)
self.assertAlmostEqual(torch.abs(var).mean(), 1, delta=1e-5)
bad_shape_g = {
(1, 2, 3, 4): 3,
(2, 3, 10): 2,
(3, 1, 1, 1, 2): 10,
(2, 6, 4, 2, 2): 4,
}
for shape, g in bad_shape_g.items():
gn = nn.GroupNorm(g, shape[1])
input = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
self.assertRaises(RuntimeError, lambda: gn(input))
def _test_GroupNorm_cuda_half(self):
input = Variable(torch.empty(2, 3, 3, 2).to("cuda", torch.half).random_(1, 10), requires_grad=True)
input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)
m = nn.GroupNorm(2, 4).to("cuda", torch.half)
output = m(input)
output.sum().backward()
self.assertEqual(output.type(), input.type())
def test_GroupNorm_general(self):
self._test_GroupNorm_general(dtype=torch.float)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_GroupNorm_general_cuda(self):
self._test_GroupNorm_general("cuda", torch.float)
self._test_GroupNorm_cuda_half()
def test_pad(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (1, 1, 1, 1)), (inputs,))
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1)), (inputs,))
_assertGradAndGradgradChecks(self, lambda x: F.pad(x, (-1, 1, -2, 1), value=2), (inputs,))
self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='replicate'), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.pad(x, (-1, 1, -2, 1), mode='reflect'), (inputs,)))
inputs = torch.randn(1, 2, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.pad(x, (1, 1, 1, 1, 1, 1), mode='replicate'), (inputs,)))
# assert that relfection padding errors when pad >= input size
expected_err_msg = r"Padding size should be less than the corresponding input dimension"
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(torch.randn(1, 1, 2, 3), (1, 1, 3, 0), mode='reflect'))
self.assertRaisesRegex(RuntimeError, expected_err_msg,
lambda: F.pad(torch.randn(1, 1, 2), (2, 1), mode='reflect'))
def test_pad_scalar_error(self):
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))
def test_normalize(self):
inputs = torch.randn(1, 3, 4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))
inputs = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))
def _test_maxpool_indices(self, num_dim, adaptive=False, device="cpu", dtype=torch.float):
def expected_indices(dim):
if dim == 1:
return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)
if dim == 2:
return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)
def expected_grad(dim):
if dim == 1:
return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)
grad = expected_grad(dim - 1)
zero = torch.zeros(grad.size())
return torch.stack((zero, grad, zero, grad), 2)
def expected_output(dim):
if dim == 1:
return torch.arange(2, 17, 2).view(2, 2, 2)
if dim == 2:
col = torch.arange(6, 63, 8)
return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)
if adaptive:
cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)
else:
cls_name = 'MaxPool{}d'.format(num_dim)
module_cls = getattr(nn, cls_name)
module = module_cls(2, return_indices=True).to(device, dtype=dtype)
numel = 4 ** (num_dim + 1)
input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)
input_var = torch.tensor(input, requires_grad=True)
# Check forward
output, indices = module(input_var)
if num_dim != 3:
expected_indices = expected_indices(num_dim)
expected_output = expected_output(num_dim)
self.assertEqual(indices.dim(), input.dim())
self.assertEqual(indices.data.squeeze(), expected_indices)
self.assertEqual(output.data.squeeze(), expected_output)
self.assertTrue(output.requires_grad)
self.assertFalse(indices.requires_grad)
# Make sure backward works
grad_output = torch.ones(output.size(), device=device, dtype=dtype)
output.backward(grad_output, retain_graph=True)
expected_grad = expected_grad(num_dim)
self.assertEqual(input_var.grad.data, expected_grad.view_as(input))
# Make sure backward after changing indices will result in an error
indices.add_(1)
self.assertRaises(RuntimeError, lambda: output.backward(grad_output))
def test_adaptive_pooling_input_size(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * numel
module = module_cls(output_size)
input = torch.randn(output_size)
self.assertRaises(ValueError, lambda: module(input))
def test_adaptive_pooling_size_none(self):
for numel in (2, 3):
for pool_type in ('Max', 'Avg'):
cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
module_cls = getattr(nn, cls_name)
output_size = (2,) * (numel - 1) + (None,)
module = module_cls(output_size)
input = torch.randn((4,) * (numel + 1))
output = module(input)
self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))
def test_Conv2d_naive_groups(self):
self._test_Conv2d_naive_groups()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_Conv2d_naive_groups_cuda(self, dtype=torch.float):
self._test_Conv2d_naive_groups("cuda", dtype)
def test_batchnorm_eval(self):
self._test_batchnorm_eval()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_eval_cuda(self, dtype=torch.float):
self._test_batchnorm_eval("cuda", dtype)
def test_batchnorm_simple_average(self):
self._test_batchnorm_simple_average()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_simple_average_cuda(self):
self._test_batchnorm_simple_average(torch.cuda.FloatTensor)
def test_MaxPool1d_indices(self):
self._test_maxpool_indices(1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool1d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(1, device="cuda", dtype=dtype)
def test_MaxPool2d_indices(self):
self._test_maxpool_indices(2)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool2d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(2, device="cuda", dtype=dtype)
def test_MaxPool3d_indices(self):
self._test_maxpool_indices(3)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_MaxPool3d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(3, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool1d_indices(self):
self._test_maxpool_indices(1, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool1d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(1, adaptive=True, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool2d_indices(self):
self._test_maxpool_indices(2, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool2d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(2, adaptive=True, device="cuda", dtype=dtype)
def test_AdaptiveMaxPool3d_indices(self):
self._test_maxpool_indices(3, adaptive=True)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_AdaptiveMaxPool3d_indices_cuda(self, dtype=torch.float):
self._test_maxpool_indices(3, adaptive=True, device="cuda", dtype=dtype)
@staticmethod
def _test_max_pool_nan(self, device, dtype=torch.float):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
fn_name = '{}max_pool{}d'.format(adaptive, num_dim)
fn = getattr(F, fn_name)
x = torch.full([1, 1] + num_dim * [3], nan)
res = fn(x, 1 if adaptive else 3)
self.assertTrue(math.isnan(res.item()))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_max_pool_nan_cuda(self, dtype=torch.float):
self._test_max_pool_nan(self, device="cuda", dtype=dtype)
def test_max_pool_nan(self, dtype=torch.float):
self._test_max_pool_nan(self, device="cpu")
def _test_scatter(self, tensor):
x = torch.tensor(tensor, requires_grad=True)
result = dp.scatter(x, (0, 1))
self.assertEqual(len(result), 2)
self.assertEqual(result[0], x[:2])
self.assertEqual(result[0].get_device(), 0)
self.assertEqual(result[1], x[2:])
self.assertEqual(result[1].get_device(), 1)
grad = result[0].data.clone().fill_(2)
result[0].backward(grad)
self.assertEqual(x.grad.data[:2], grad)
self.assertEqual(x.grad.data[2:], grad.clone().zero_())
_assertGradAndGradgradChecks(self, lambda y: dp.scatter(y, (0, 1)), (x,))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda())
def _test_gather(self, output_device):
inputs = (
torch.randn(2, 4, device='cuda:0', requires_grad=True),
torch.randn(2, 4, device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([4, 4]))
self.assertEqual(result[:2], inputs[0])
self.assertEqual(result[2:], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn(4, 4)
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad.data, grad[:2])
self.assertEqual(inputs[1].grad.data, grad[2:])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
# test scalar inputs, should stack into a vector in this case
inputs = (
torch.randn((), device='cuda:0', requires_grad=True),
torch.randn((), device='cuda:1', requires_grad=True),
)
result = dp.gather(inputs, output_device)
self.assertEqual(result.size(), torch.Size([2]))
self.assertEqual(result[0], inputs[0])
self.assertEqual(result[1], inputs[1])
if output_device != -1:
self.assertEqual(result.get_device(), output_device)
else:
self.assertFalse(result.is_cuda)
grad = torch.randn(2)
if output_device != -1:
grad = grad.cuda(output_device)
result.backward(grad)
self.assertEqual(inputs[0].grad, grad[0])
self.assertEqual(inputs[1].grad, grad[1])
_assertGradAndGradgradChecks(self, lambda x, y: dp.gather((x, y), output_device), inputs)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_cpu(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_gpu(self):
self._test_gather(0)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_gather_different_len_dicts(self):
inputs = (
{'a': Variable(torch.randn(1, 2).cuda(0), requires_grad=True)},
{
'b': Variable(torch.randn(1, 2).cuda(1), requires_grad=True),
'a': Variable(torch.randn(1, 2).cuda(1), requires_grad=True)
}
)
with self.assertRaises(ValueError):
_ = dp.gather(inputs, target_device=0)
def _test_broadcast_double_backwards(self, *tensors):
variables = tuple(torch.tensor(t, requires_grad=True) for t in tensors)
_assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), variables)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_double_backwards_gpu(self):
self._test_broadcast_double_backwards(torch.randn(4, 4).cuda(),
torch.randn(4, 4).cuda(),
torch.randn(4, 4).cuda())
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_not_requiring_grad(self):
variables = [
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
Variable(torch.randn(1, 2).cuda(), requires_grad=False),
Variable(torch.randn(1, 2).cuda(), requires_grad=False),
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
Variable(torch.randn(1, 2).cuda(), requires_grad=True),
]
broadcasted_variables = Broadcast.apply((0, 1), *variables)
for output_idx, broadcasted_var in enumerate(broadcasted_variables):
input_var = variables[output_idx % len(variables)]
self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_broadcast_no_grad(self):
x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')
with torch.no_grad():
broadcasted = Broadcast.apply((0, 1), x)
self.assertTrue(x.requires_grad)
for output in broadcasted:
self.assertFalse(output.requires_grad)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_replicate(self):
module = nn.Linear(10, 5).float().cuda()
input = Variable(torch.randn(2, 10).float().cuda())
expected_output = module(input).data
replicas = dp.replicate(module, (0, 1))
for i, replica in enumerate(replicas):
for p in replica.parameters():
self.assertEqual(p.get_device(), i)
replica_input = input.cuda(i)
self.assertEqual(replica(replica_input).data, expected_output)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_replicate_buffers(self):
net = nn.Module()
net.bn = nn.BatchNorm2d(10)
net.cuda()
replicas = dp.replicate(net, (0, 1))
for i, replica in enumerate(replicas):
self.assertEqual(replica.bn.running_mean.get_device(), i, 'buffer on wrong device')
self.assertEqual(replica.bn.running_var.get_device(), i, 'buffer on wrong device')
self.assertEqual(replica.bn.num_batches_tracked.get_device(), i, 'buffer on wrong device')
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_parallel_apply(self):
l1 = nn.Linear(10, 5).to("cuda:0", torch.float)
l2 = nn.Linear(10, 5).to("cuda:1", torch.float)
i1 = torch.randn(2, 10, device="cuda:0", dtype=torch.float)
i2 = torch.randn(2, 10, device="cuda:1", dtype=torch.float)
expected1 = l1(i1).data
expected2 = l2(i2).data
modules = (l1, l2)
expected_outputs = (expected1, expected2)
# each input can be either a collection of positional arguments
# or an object representing the single argument
for inputs in [((i1,), (i2,)), (i1, i2)]:
outputs = dp.parallel_apply(modules, inputs, None)
for out, expected in zip(outputs, expected_outputs):
self.assertEqual(out.data, expected)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_multiple_input(self):
class TestModule(nn.Module):
def forward(self, var1, var2, float1, var3=None):
if var3 is None:
return float1 * (var1 * var2)
else:
return float1 * (var1 * var2 + var3)
m = TestModule()
var1 = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
var2 = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
var3 = torch.randn(5, 5, dtype=torch.float, requires_grad=False)
float1 = torch.randn(1).item()
expected = m(var1, var2, float1)
loss = expected.sum()
loss.backward()
gvar1_exp = var1.grad.clone()
gvar2_exp = var2.grad.clone()
def local_test(out):
var1.grad.data.fill_(0.0)
var2.grad.data.fill_(0.0)
loss = out.sum()
loss.backward()
self.assertEqual(out, expected)
self.assertEqual(gvar1_exp, var1.grad)
self.assertEqual(gvar2_exp, var2.grad)
out = dp.data_parallel(m, (var1, var2, float1), (0, 1))
local_test(out)
out = dp.data_parallel(m, (var1, var2, float1), (1, 0))
local_test(out)
out = dp.data_parallel(m, (var1, var2, float1), (0,))
local_test(out)
var1.grad.data.fill_(0.0)
var2.grad.data.fill_(0.0)
expected = m(var1, var2, float1, var3=var3)
loss = expected.sum()
loss.backward()
gvar1_exp = var1.grad.clone()
gvar2_exp = var2.grad.clone()
dpm = nn.DataParallel(TestModule())
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
dpm = nn.DataParallel(TestModule(), device_ids=[0])
out = dpm(var1, var2, float1, var3=var3)
local_test(out)
kwarg_wrap = {'var3': var3}
out = dp.data_parallel(
m, (var1, var2, float1), (0, 1), module_kwargs=kwarg_wrap)
local_test(out)
out = dp.data_parallel(
m, (var1, var2, float1), (0,), module_kwargs=kwarg_wrap)
local_test(out)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_small_back(self):
l = nn.Linear(10, 5).float().cuda()
i = Variable(torch.randn(20, 10).float().cuda())
out = dp.data_parallel(l, i, (0, 1))
self.assertEqual(out, l(i))
@unittest.skipIf(not TEST_MULTIGPU or not PY3, "multi-GPU not supported")
def test_data_parallel_model_no_refcycles(self):
# Python 2.7 will create reference cycles with the following
# Module on multiple GPUs, but Python 3 shouldn't unless
# there are refcycles on the PyTorch side (or the defined module)
import gc
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
gc.collect()
model = nn.DataParallel(Model().cuda())
data = Variable(torch.randn(1).cuda())
model(data)
refcycles = gc.collect()
self.assertEqual(refcycles, 0)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_no_grad(self):
test = self
class Layer(nn.Module):
def forward(self, x):
test.assertFalse(torch.is_grad_enabled())
return x
l = Layer()
i = Variable(torch.randn(20, 10).float().cuda())
with torch.no_grad():
dp.data_parallel(l, i, (0, 1))
self.assertRaises(AssertionError, lambda: dp.data_parallel(l, i, (0, 1)))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel(self):
l = nn.Linear(10, 5).float().cuda()
i = Variable(torch.randn(20, 10).float().cuda(1))
l.cuda(1)
expected_out = l(i)
loss = expected_out.sum()
loss.backward()
expected_grads = []
for param in l.parameters():
expected_grads.append(param.grad.clone())
dev_ids_list = [(0, 1), (1, 0)]
for dev_id in dev_ids_list:
with torch.cuda.device(dev_id[0]):
l.cuda()
l.zero_grad()
out = dp.data_parallel(l, i, dev_id)
loss = out.sum()
loss.backward()
self.assertEqual(out.get_device(), dev_id[0])
self.assertEqual(out.data, expected_out.data)
for expected, param in zip(expected_grads, l.parameters()):
self.assertEqual(param.grad.data, expected.data)
# Check for None device_ids
l = l.cuda()
out = dp.data_parallel(l, i)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_sparse(self):
l = nn.Embedding(10, 5, sparse=True).to("cuda:1")
i = torch.randint(10, (20, 5), device="cuda:1", dtype=torch.long)
expected_out = l(i)
loss = expected_out.sum()
loss.backward()
expected_grads = []
for param in l.parameters():
expected_grads.append(param.grad.clone())
dev_ids_list = [(0, 1), (1, 0)]
for dev_id in dev_ids_list:
with torch.cuda.device(dev_id[0]):
l.cuda()
l.zero_grad()
out = dp.data_parallel(l, i, dev_id)
loss = out.sum()
loss.backward()
self.assertEqual(out.get_device(), dev_id[0])
self.assertEqual(out.data, expected_out.data)
for expected, param in zip(expected_grads, l.parameters()):
self.assertEqual(param.grad.data, expected.data)
# Check for None device_ids
l = l.cuda()
out = dp.data_parallel(l, i)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_output(self):
def fn(input):
return [
input, (input.sin(), input.cos(), [input.add(1)]), input,
{'a': input, 'b': [input.sin()]}
]
class Net(nn.Module):
def forward(self, input):
return fn(input)
i = Variable(torch.randn(2, 2).float().cuda(1))
gpus = range(torch.cuda.device_count())
output = dp.data_parallel(Net(), i, gpus)
self.assertEqual(output, fn(i))
self.assertIsInstance(output[0], torch.Tensor)
self.assertIsInstance(output[1], tuple)
self.assertIsInstance(output[1][0], torch.Tensor)
self.assertIsInstance(output[1][1], torch.Tensor)
self.assertIsInstance(output[1][2], list)
self.assertIsInstance(output[1][2][0], torch.Tensor)
self.assertIsInstance(output[2], torch.Tensor)
self.assertIsInstance(output[3], dict)
self.assertEqual(len(output[3]), 2)
self.assertIn('a', output[3])
self.assertIn('b', output[3])
self.assertIsInstance(output[3]['a'], torch.Tensor)
self.assertIsInstance(output[3]['b'], list)
self.assertIsInstance(output[3]['b'][0], torch.Tensor)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_data_parallel_nested_input(self):
def fn(input):
return input[1][0]
class Net(nn.Module):
def forward(self, *input):
return fn(input)
i = Variable(torch.randn(20, 3).float().cuda(1))
input = (i.cos(), (i.sin(), i), i.sin())
gpus = range(torch.cuda.device_count())
output = dp.data_parallel(Net(), input, gpus)
self.assertEqual(output, fn(input))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_data_parallel_module(self, dtype=torch.float):
l = nn.Linear(10, 5).to("cuda", dtype)
i = torch.randn(20, 10, device="cuda", dtype=dtype)
expected_out = l(i).data
net = nn.DataParallel(l)
out = net(i)
self.assertEqual(out.get_device(), 0)
self.assertEqual(out.data, expected_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_data_parallel_module_kwargs_only(self, dtype=torch.float):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input)
l = nn.Linear(10, 5).to("cuda", dtype)
i = torch.randn(20, 10, device="cuda", dtype=dtype)
expected_out = l(i).data
n = nn.DataParallel(Net())
out = n(input=i)
self.assertEqual(out.get_device(), 0)
self.assertEqual(out.data, expected_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_data_parallel_module_kwargs_only_empty_list(self, dtype=torch.float):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to("cuda", dtype)
i = torch.randn(20, 10, device="cuda", dtype=dtype)
expected_out = l(i).data
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': []})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out.data, expected_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_data_parallel_module_kwargs_only_empty_dict(self, dtype=torch.float):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to("cuda", dtype)
i = torch.randn(20, 10, device="cuda", dtype=dtype)
expected_out = l(i).data
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': {}})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out.data, expected_out)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(ALL_TENSORTYPES)
def test_data_parallel_module_kwargs_only_empty_tuple(self, dtype=torch.float):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.l = l
def forward(self, input):
return self.l(input['data'])
l = nn.Linear(10, 5).to("cuda", dtype)
i = torch.randn(20, 10, device="cuda", dtype=dtype)
expected_out = l(i).data
n = nn.DataParallel(Net())
out = n(input={'data': i, 'unused': ()})
self.assertEqual(out.get_device(), 0)
self.assertEqual(out.data, expected_out)
def test_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
self.assertEqual(len(state_dict), 10)
self.assertEqual(len(state_dict._metadata), 6)
self.assertIn('', state_dict._metadata)
self.assertIn('linear1', state_dict._metadata)
self.assertIn('linear1.weight', state_dict)
self.assertIn('linear1.bias', state_dict)
self.assertIn('linear2', state_dict._metadata)
self.assertIn('linear2.weight', state_dict)
self.assertIn('linear2.bias', state_dict)
self.assertIn('block', state_dict._metadata)
self.assertIn('block.conv', state_dict._metadata)
self.assertIn('block.conv.weight', state_dict)
self.assertIn('block.conv.weight', state_dict)
self.assertNotIn('block.conv.bias', state_dict)
self.assertIn('bn', state_dict._metadata)
self.assertIn('bn.weight', state_dict)
self.assertIn('bn.bias', state_dict)
self.assertIn('bn.running_var', state_dict)
self.assertIn('bn.running_mean', state_dict)
self.assertIn('bn.num_batches_tracked', state_dict)
self.assertFalse(any(map(lambda k: k.startswith('empty'), state_dict.keys())))
for k, v in state_dict.items():
param = net
for component in k.split('.'):
param = getattr(param, component)
if isinstance(param, Parameter):
param = param.data
self.assertEqual(v.data_ptr(), param.data_ptr())
l = nn.Linear(5, 5)
state_dict = l.state_dict()
self.assertEqual(len(state_dict), 2)
self.assertEqual(len(state_dict._metadata), 1)
self.assertIn('', state_dict._metadata)
self.assertTrue(state_dict._metadata['']['version'] >= 0)
self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())
self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())
def test_load_state_dict(self):
l = nn.Linear(5, 5)
block = nn.Module()
block.conv1 = nn.Conv2d(3, 3, 3, bias=True)
block.conv2 = nn.Conv2d(3, 3, 3, bias=False)
net = nn.Module()
net.linear1 = l
net.linear2 = l
net.bn = nn.BatchNorm2d(2)
net.block = block
net.add_module('empty', None)
state_dict = net.state_dict()
state_dict.update({
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4),
'bn.running_mean': torch.randn(2),
})
net.load_state_dict(state_dict)
self.assertEqual(net.linear1.weight.data, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias.data, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
state_dict = net.state_dict()
state_dict.update({'extra': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
state_dict = net.state_dict()
state_dict.update({'extra.param': torch.ones(5)})
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
state_dict = net.state_dict()
del state_dict['linear1.weight']
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
state_dict = net.state_dict()
state_dict.update({'bn.running_mean': torch.rand(14, 4)}) # wrong size
self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))
state_dict = net.state_dict()
old_state_dict = deepcopy(state_dict)
state_dict = {
'linear1.weight': torch.ones(5, 5),
'block.conv1.bias': torch.arange(1, 4),
'bn.running_mean': torch.randn(2),
'nonexistent_key': torch.rand(3)
}
net.load_state_dict(state_dict, strict=False)
self.assertEqual(net.linear1.weight.data, state_dict['linear1.weight'])
self.assertEqual(net.block.conv1.bias.data, state_dict['block.conv1.bias'])
self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])
new_state_dict = net.state_dict()
del old_state_dict['linear1.weight']
del old_state_dict['block.conv1.bias']
del old_state_dict['bn.running_mean']
for k, v, in old_state_dict.items():
self.assertTrue(v.equal(new_state_dict[k]))
def test_load_state_dict_BC(self):
# BatchNormNd
# Added num_batches_tracked buffer at version 2. For state dict with
# earlier versions or no versions, it should provide default value of 0.
bn = nn.BatchNorm2d(3)
state_dict = bn.state_dict()
del state_dict['num_batches_tracked']
state_dict._metadata['']['version'] = 1 # version 1
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
del state_dict._metadata['']['version'] # no version
bn.load_state_dict(state_dict)
self.assertEqual(bn.num_batches_tracked.dtype, torch.long)
self.assertEqual(bn.num_batches_tracked.item(), 0)
def test_parameter_assignment(self):
l = nn.Linear(5, 5)
def num_params():
return len(list(l.parameters()))
self.assertEqual(num_params(), 2)
new_param = Parameter(torch.randn(5, 5))
l.param_name = new_param
self.assertEqual(num_params(), 3)
self.assertObjectIn(new_param, l.parameters())
var = torch.randn(5, 5)
l.var_name = var
self.assertEqual(num_params(), 3)
self.assertNotIn(id(var), map(id, l.parameters()))
# Make sure Variables are not saved as parameters
l.variable_attr = torch.empty(5, 5)
self.assertEqual(num_params(), 3)
l.param_attr = Parameter(torch.empty(5, 5))
self.assertEqual(num_params(), 4)
# It shouldn't be possible to replace a parameter with a Variable
def assign_var():
l.param_attr = torch.empty(5, 5)
self.assertRaises(TypeError, assign_var)
# But replacing it with None should be fine
l.param_attr = None
self.assertEqual(num_params(), 3)
def test_assignment(self):
l = nn.Module()
a = nn.Parameter(torch.randn(2))
b = nn.Parameter(torch.randn(3))
c = nn.Parameter(torch.randn(4))
q = nn.Linear(4, 4)
r = nn.Linear(5, 5)
w = nn.Linear(6, 6)
def test_assignments(get_list, a, b, c):
# Check that None can be shadowed
l.a = None
self.assertIsNone(l.a)
self.assertIn('a', l.__dict__)
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a])
self.assertNotIn('a', l.__dict__)
# Assign second object
l.b = None
self.assertIsNone(l.b)
self.assertIn('b', l.__dict__)
l.b = b
self.assertIs(l.b, b)
self.assertEqual(get_list(), [a, b])
self.assertNotIn('b', l.__dict__)
# Remove and add the object back. Order should be unchanged.
l.a = None
self.assertIsNone(l.a)
self.assertEqual(get_list(), [b])
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [a, b])
# Replace object with another one. Order should be unchanged.
l.a = c
self.assertIs(l.a, c)
self.assertEqual(get_list(), [c, b])
# Remove and reassign an attribute. It should appear at the end of the list now.
del l.a
self.assertFalse(hasattr(l, 'a'))
l.a = a
self.assertIs(l.a, a)
self.assertEqual(get_list(), [b, a])
test_assignments(lambda: list(l.parameters()), a, b, c)
del l.a, l.b
self.assertEqual(list(l.parameters()), [])
test_assignments(lambda: list(l.children()), q, r, w)
del l.a, l.b
self.assertEqual(list(l.children()), [])
buf = torch.randn(10)
l.register_buffer('buf', buf)
self.assertIs(l.buf, buf)
l.buf = None
self.assertIs(l.buf, None)
self.assertNotIn('buf', l.__dict__) # should be stored in l._buffers
l.buf = buf
self.assertIn('buf', l.state_dict())
self.assertEqual(l.state_dict()['buf'], buf)
def test_Conv2d_inconsistent_types(self):
inputs = Variable(torch.randn(4, 1, 7, 7).float())
weights = Variable(torch.randn(1, 1, 3, 3).double())
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = Variable(torch.randn(4, 1, 7, 7).float().cuda())
weights = Variable(torch.randn(1, 1, 3, 3).double().cuda())
bias = Variable(torch.randn(1).double().cuda())
with torch.backends.cudnn.flags(enabled=False):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = Variable(torch.randn(4, 1, 7, 7).float().cuda())
weights = Variable(torch.randn(1, 1, 3, 3).double().cuda())
bias = Variable(torch.randn(1).double().cuda())
with torch.backends.cudnn.flags(enabled=True):
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_Conv2d_deterministic_cudnn(self, dtype=torch.float):
inputs = torch.randn(2, 3, 5, 5, device="cuda", dtype=dtype, requires_grad=True)
with cudnn.flags(enabled=True, benchmark=True, deterministic=True):
conv1 = torch.nn.Conv2d(3, 3, 3).to("cuda", dtype)
conv2 = torch.nn.Conv2d(3, 3, 3).to("cuda", dtype)
conv2.bias.data.copy_(conv1.bias.data)
conv2.weight.data.copy_(conv1.weight.data)
out1 = conv1(inputs)
out2 = conv2(inputs)
self.assertEqual(out1, out2, prec=0.0)
y = torch.randn(out1.size(), device="cuda", dtype=dtype)
out1.backward(y)
out2.backward(y)
self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, prec=0.0)
self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, prec=0.0)
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: o1.sum().backward())
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_Conv2d_large_workspace(self, dtype=torch.float):
# These sizes require huge cuDNN workspaces. Make sure we choose a
# reasonable algorithm that does not run out of memory
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
with torch.backends.cudnn.flags(benchmark=benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to("cuda", dtype)
for size in sizes:
x = torch.randn(size, device="cuda", dtype=dtype)
out = conv(torch.tensor(x, requires_grad=True))
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
def test_conv_modules_raise_error_on_incorrect_input_size(self):
modules = [nn.Conv1d(3, 8, 3), nn.ConvTranspose1d(3, 8, 3),
nn.Conv2d(3, 8, 3), nn.ConvTranspose2d(3, 8, 3),
nn.Conv3d(3, 8, 3), nn.ConvTranspose3d(3, 8, 3)]
invalid_input_dims = [(2, 4), (2, 4),
(3, 5), (3, 5),
(4, 6), (4, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3, ) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size):
input = torch.empty(3, *input_size)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
# just run it to ensure no exception raised.
module(input)
# Conv1d
test(True, nn.Conv1d(1, 1, 3), (1, 2))
test(True, nn.Conv1d(1, 1, 3, stride=2), (1, 2))
test(False, nn.Conv1d(1, 1, 2), (1, 2))
test(False, nn.Conv1d(1, 1, 2, stride=2), (1, 2))
test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1), (1, 2))
# Conv2d
test(True, nn.Conv2d(1, 1, (3, 3)), (1, 2, 2))
test(False, nn.Conv2d(1, 1, (3, 3)), (1, 3, 3))
test(False, nn.Conv2d(1, 1, (3, 3), padding=1), (1, 2, 2))
# Conv3D
test(True, nn.Conv3d(1, 1, (3, 3, 3)), (1, 2, 2, 2))
test(False, nn.Conv3d(1, 1, (3, 3, 3)), (1, 3, 3, 3))
test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1), (1, 2, 2, 2))
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def _test_Conv2d_naive_groups(self, device="cpu", dtype=torch.float):
# Check that grouped convolutions matches two half convolutions
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = Variable(i.data[:, :2].contiguous(), requires_grad=True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = Variable(i.data[:, 2:].contiguous(), requires_grad=True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
prec=dtype2prec[dtype])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
prec=dtype2prec[dtype])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
prec=dtype2prec[dtype])
# For https://github.com/pytorch/pytorch/pull/1273
# Almost identical to the above `test_Conv2d_naive_groups`
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = Variable(i.data[:, :2].contiguous(), requires_grad=True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = Variable(i.data[:, 2:].contiguous(), requires_grad=True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
dtype2prec[dtype])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
dtype2prec[dtype])
# Very similar to test_Conv2d_naive_groups but with special care to handle
# the number of groups == number of input channels
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_Conv2d_depthwise_naive_groups_cuda(self, dtype=torch.float):
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to("cuda", dtype)
i = torch.tensor(torch.randn(2, 2, 6, 6, device="cuda", dtype=dtype) / 2, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device="cuda", dtype=dtype) / 2
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = torch.tensor(i.data[:, :1].contiguous(), requires_grad=True)
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to("cuda", dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = torch.tensor(i.data[:, 1:].contiguous(), requires_grad=True)
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1),
prec=dtype2prec[dtype])
self.assertEqual(i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
prec=dtype2prec[dtype])
self.assertEqual(m.bias.grad.data,
torch.cat([m1.bias.grad.data,
m2.bias.grad.data], 0),
prec=dtype2prec[dtype])
self.assertEqual(m.weight.grad.data,
torch.cat([m1.weight.grad.data,
m2.weight.grad.data], 0),
prec=dtype2prec[dtype])
def test_MaxUnpool2d_output_size(self):
m = nn.MaxPool2d(3, stride=2, return_indices=True)
mu = nn.MaxUnpool2d(3, stride=2)
big_t = torch.rand(1, 1, 6, 6)
big_t[0][0][4][4] = 100
output_big, indices_big = m(big_t)
self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))
small_t = torch.rand(1, 1, 5, 5)
for i in range(0, 4, 2):
for j in range(0, 4, 2):
small_t[:, :, i, j] = 100
output_small, indices_small = m(Variable(small_t))
for h in range(3, 10):
for w in range(3, 10):
if 4 <= h <= 6 and 4 <= w <= 6:
size = (h, w)
if h == 5:
size = torch.LongStorage(size)
elif h == 6:
size = torch.LongStorage((1, 1) + size)
mu(output_small, indices_small, output_size=size)
else:
self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))
def test_container_copy(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = nn.Linear(4, 5)
def forward(self, input):
return self.linear(input)
input = torch.randn(2, 4)
model = Model()
model_cp = deepcopy(model)
self.assertEqual(model(input).data, model_cp(input).data)
model_cp.linear.weight.data[:] = 2
self.assertNotEqual(model(input).data, model_cp(input).data)
def test_RNN_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for module in (nn.RNNCell, nn.GRUCell):
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cell = module(10, 20, bias=bias)
for i in range(6):
hx = cell(input, hx)
hx.sum().backward()
def _test_loss_equal_input_target_shape(self, cast):
# Tests losses whose inputs should have the same size.
losses = {
'mse_loss': lambda x, y: F.mse_loss(x, y),
'l1_loss': lambda x, y: F.l1_loss(x, y),
'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),
'kl_div': lambda x, y: F.kl_div(x, y),
'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),
}
input = Variable(cast(torch.randn(3, 5)))
target = Variable(cast(torch.randn(5, 3)))
for name, fn in losses.items():
self.assertRaises(Exception, lambda: fn(input, target))
def test_loss_equal_input_target_shape(self):
self._test_loss_equal_input_target_shape(lambda x: x)
def test_NLLLoss_mismatched_batch(self):
x = torch.randn((10, 3), requires_grad=True)
# t should have size (10,)
t = torch.zeros((3,), dtype=torch.int64)
with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):
F.nll_loss(x, t)
@unittest.skipIf(not (TEST_CUDNN and TEST_CUDNN_VERSION >= 7000), "needs cudnn >= 7.0")
def test_CTCLoss_cudnn(self):
target_lengths = [30, 25, 20]
input_lengths = [50, 50, 50]
targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)
log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)
res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()
with torch.backends.cudnn.flags(enabled=False):
res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths)
self.assertEqual(res, expected)
self.assertEqual(res2, res)
def test_RNN_cell_no_broadcasting(self):
def test(cell_module, input, hx, input_size, hidden_size):
cell = cell_module(input_size, hidden_size)
self.assertRaises(RuntimeError, lambda: cell(input, hx))
def test_all(hidden_size, bad_hx, good_hx, input_size, input):
test(nn.RNNCell, input, bad_hx, input_size, hidden_size)
test(nn.GRUCell, input, bad_hx, input_size, hidden_size)
test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)
test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)
hidden_size = 20
input_size = 10
input = torch.randn(3, input_size)
bad_hx = torch.randn(1, hidden_size)
good_hx = torch.randn(3, hidden_size)
# Test hidden/input batch size broadcasting
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test hx's hidden_size vs module's hidden_size broadcasting
bad_hx = torch.randn(3, 1)
test_all(hidden_size, bad_hx, good_hx, input_size, input)
# Test input's input_size vs module's input_size broadcasting
bad_input = torch.randn(3, 1)
test_all(hidden_size, good_hx, good_hx, input_size, bad_input)
def test_invalid_dropout_p(self):
v = torch.ones(1)
self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))
self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))
self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))
self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))
def test_pad_sequence(self):
def pad(tensor, length):
return torch.cat(
[tensor.data, tensor.data.new(
length - tensor.size(0), *tensor.size()[1:]).zero_()])
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
# batch_first = true
expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])
padded = rnn_utils.pad_sequence([b, a, c], True)
self.assertEqual(padded, expected)
# batch_first = false
padded = rnn_utils.pad_sequence([b, a, c])
self.assertEqual(padded, expected.transpose(0, 1))
# pad with non-zero value
expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])
padded = rnn_utils.pad_sequence([b, a, c], True, 1)
self.assertEqual(padded, expected)
# Test pad sorted sequence
expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])
padded = rnn_utils.pad_sequence([a, b, c], True)
self.assertEqual(padded, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
trailing_dims = [4] * num_dim
for i in range(1, maxlen + 1):
seq_len = i * i
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
random.shuffle(sequences)
expected = []
for seq in sequences:
expected.append(pad(seq, maxlen * maxlen))
# batch first = true
expected = torch.stack(expected)
padded = rnn_utils.pad_sequence(sequences, True)
self.assertEqual(padded, expected)
# batch first = false
padded = rnn_utils.pad_sequence(sequences)
self.assertEqual(padded, expected.transpose(0, 1))
def test_pack_sequence(self):
def _compatibility_test(sequences, lengths, batch_first):
padded = rnn_utils.pad_sequence(sequences, batch_first)
packed = rnn_utils.pack_sequence(sequences)
unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)
self.assertEqual(padded, unpacked[0])
pack_padded = rnn_utils.pack_padded_sequence(padded, lengths, batch_first)
self.assertEqual(packed, pack_padded)
# single dimensional
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5])
c = torch.tensor([6])
packed = rnn_utils.pack_sequence([a, b, c])
expected = torch.tensor([1, 4, 6, 2, 5, 3])
self.assertEqual(packed.batch_sizes, [3, 2, 1])
self.assertEqual(packed.data.data, expected)
# more dimensions
maxlen = 9
for num_dim in (0, 1, 2, 3):
sequences = []
lengths = []
trailing_dims = [4] * num_dim
for i in range(maxlen, 0, -1):
seq_len = i * i
lengths.append(seq_len)
sequences.append(torch.rand(seq_len, 5, *trailing_dims))
# compatibility with other utilities
for batch_first in (True, False):
_compatibility_test(sequences, lengths, batch_first)
def test_pack_padded_sequence(self):
def pad(tensor, length):
return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])
lengths = [10, 8, 4, 2, 2, 2, 1]
max_length = lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, lengths))) for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(lengths, 1)], 1)
padded = torch.tensor(padded, requires_grad=True)
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
for batch_first in (True, False):
src = padded
if batch_first:
src = src.transpose(0, 1)
# check output
packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first)
self.assertEqual(packed.data.data, expected_data)
self.assertEqual(packed.batch_sizes, batch_sizes)
# test inverse
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)
self.assertEqual(unpacked, src)
self.assertEqual(unpacked_len, lengths)
# check grad
if padded.grad is not None:
padded.grad.data.zero_()
grad_output = unpacked.data.clone().normal_()
unpacked.backward(grad_output)
if batch_first:
grad_output.transpose_(0, 1)
for i, l in enumerate(lengths):
self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])
if l < 10:
self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)
def _test_variable_sequence(self, device="cpu", dtype=torch.float):
def pad(var, length):
if var.size(0) == length:
return var
return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])
lengths = [10, 10, 6, 2, 2, 1, 1]
max_length = lengths[0]
x_leaf = torch.randn(max_length, len(lengths), 3, device=device, dtype=dtype, requires_grad=True)
lstm = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to(device, dtype)
lstm2 = deepcopy(lstm).to(device, dtype)
x = x_leaf
# Compute sequences separately
seq_outs = []
seq_hiddens = []
for i, l in enumerate(lengths):
out, hid = lstm2(x[:l, i:i + 1])
out_pad = pad(out, max_length)
seq_outs.append(out_pad)
seq_hiddens.append(hid)
seq_out = torch.cat(seq_outs, 1)
seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))
# Use packed format
packed = rnn_utils.pack_padded_sequence(x, lengths)
packed_out, packed_hidden = lstm(packed)
unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)
# Check forward
self.assertEqual(packed_hidden, seq_hidden)
self.assertEqual(unpacked, seq_out)
self.assertEqual(unpacked_len, lengths)
# Check backward
seq_out.sum().backward()
grad_x = x_leaf.grad.data.clone()
x_leaf.grad.data.zero_()
unpacked.sum().backward()
self.assertEqual(x_leaf.grad, grad_x)
for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):
self.assertEqual(p1.grad, p2.grad, dtype2prec[dtype])
def test_variable_sequence(self):
self._test_variable_sequence()
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_variable_sequence_cuda(self, dtype=torch.float):
self._test_variable_sequence("cuda", dtype)
def test_LSTM_cell(self):
# this is just a smoke test; these modules are implemented through
# autograd so no Jacobian test is needed
for bias in (True, False):
input = torch.randn(3, 10)
hx = torch.randn(3, 20)
cx = torch.randn(3, 20)
lstm = nn.LSTMCell(10, 20, bias=bias)
for i in range(6):
hx, cx = lstm(input, (hx, cx))
(hx + cx).sum().backward()
@unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')
def test_cudnn_rnn_dropout_states_device(self):
rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)
device = 1
input = torch.randn(5, 4, 10).cuda(device)
rnn.cuda(device)
hx = torch.randn(2, 4, 20).cuda(device)
output = rnn(input, hx)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_format(self):
rnns = [
nn.LSTM(10, 20, batch_first=True),
nn.GRU(10, 20, batch_first=True),
nn.RNN(10, 20, batch_first=True)
]
first_warn = True
for rnn in rnns:
rnn.cuda()
input = Variable(torch.randn(5, 4, 10).cuda(), requires_grad=True)
hx = Variable(torch.randn(1, 5, 20).cuda(), requires_grad=True)
all_vars = [input, hx] + list(rnn.parameters())
if isinstance(rnn, nn.LSTM):
cx = Variable(torch.randn(1, 5, 20).cuda(), requires_grad=True)
all_vars[2:2] = [cx]
hx = (hx, cx)
output = rnn(input, hx)
output[0].sum().backward()
grads = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
# Weights will no longer view onto the same chunk of memory
weight = all_vars[4]
weight_data = weight.data.clone()
weight.data.set_(weight_data)
for i in range(2):
with warnings.catch_warnings(record=True) as w:
output_noncontig = rnn(input, hx)
if first_warn:
self.assertEqual(len(w), 1)
self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])
first_warn = False
output_noncontig[0].sum().backward()
grads_noncontig = [v.grad.data.clone() for v in all_vars]
for v in all_vars:
v.grad.data.zero_()
self.assertEqual(output, output_noncontig)
self.assertEqual(grads_noncontig, grads)
# Make sure these still share storage
weight_data[:] = 4
self.assertEqual(weight_data, all_vars[4].data)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_cudnn_weight_tying(self):
rnns = [
nn.LSTM(10, 20, batch_first=True, bidirectional=True),
nn.GRU(10, 20, batch_first=True, bidirectional=True),
nn.RNN(10, 20, batch_first=True, bidirectional=True)
]
for rnn in rnns:
rnn.bias_ih_l0_reverse = rnn.bias_ih_l0
rnn.cuda()
input = Variable(torch.randn(5, 4, 10).cuda(), requires_grad=True)
hx = Variable(torch.randn(2, 5, 20).cuda(), requires_grad=True)
all_vars = [input, hx] + list(rnn.parameters())
opt = torch.optim.SGD(rnn.parameters(), lr=0.1)
opt.zero_grad()
if isinstance(rnn, nn.LSTM):
cx = Variable(torch.randn(2, 5, 20).cuda(), requires_grad=True)
all_vars[2:2] = [cx]
hx = (hx, cx)
with warnings.catch_warnings(record=True) as w:
output = rnn(input, hx)
output[0].sum().backward()
opt.step()
with warnings.catch_warnings(record=True) as w:
output_cuda = rnn(input, hx)
rnn.cpu()
hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()
output_cpu = rnn(input.cpu(), hx)
self.assertEqual(output_cuda, output_cpu)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_cuda_rnn_fused(self, dtype=torch.float):
def copy_rnn(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
def check_rnn_grads(rnn1, rnn2):
for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):
for x, y in zip(x_layer, y_layer):
self.assertEqual(x.grad, y.grad, prec=5e-5)
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
input_val = torch.randn(seq_length, batch, input_size, device="cuda", dtype=dtype)
grad_output = torch.randn(seq_length, batch, hidden_size, device="cuda", dtype=dtype)
hx_val = torch.randn(num_layers, batch, hidden_size, device="cuda", dtype=dtype)
grad_hy = torch.randn(num_layers, batch, hidden_size, device="cuda", dtype=dtype)
with torch.backends.cudnn.flags(enabled=False):
for module in (nn.GRU, nn.LSTM):
for bias in (True, False):
rnn = module(input_size, hidden_size, num_layers, bias=bias).to("cuda", dtype)
rnn_cuda = module(input_size, hidden_size, num_layers, bias=bias).to("cuda", dtype)
copy_rnn(rnn, rnn_cuda)
is_lstm = isinstance(rnn, nn.LSTM)
if is_lstm:
hx = (Variable(hx_val.clone(), requires_grad=True),
Variable(hx_val.clone().add(1), requires_grad=True))
hx_cuda = (Variable(hx_val.clone().cuda(), requires_grad=True),
Variable(hx_val.clone().cuda().add(1), requires_grad=True))
else:
hx = Variable(hx_val.clone(), requires_grad=True)
hx_cuda = Variable(hx_val.clone().cuda(), requires_grad=True)
inp = Variable(input_val.clone(), requires_grad=True)
inp_cu = Variable(input_val.clone().cuda(), requires_grad=True)
output1, hy1 = rnn(inp, hx)
output2, hy2 = rnn_cuda(inp_cu, hx_cuda)
if is_lstm:
torch.autograd.backward(
[output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]
)
torch.autograd.backward(
[output2, hy2[0], hy2[1]],
[grad_output.cuda(), grad_hy.cuda(), (grad_hy + 1).cuda()]
)
else:
torch.autograd.backward([output1, hy1], [grad_output, grad_hy])
torch.autograd.backward([output2, hy2], [grad_output.cuda(), grad_hy.cuda()])
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
check_rnn_grads(rnn, rnn_cuda)
self.assertEqual(inp.grad.data, inp_cu.grad.data)
if is_lstm:
self.assertEqual(hx[0].grad.data, hx_cuda[0].grad.data)
self.assertEqual(hx[1].grad.data, hx_cuda[1].grad.data)
else:
self.assertEqual(hx.grad.data, hx_cuda.grad.data)
def test_rnn_args_check(self):
input_size = 3
hidden_size = 5
num_layers = 2
batch_size = 4
seq_len = 6
num_directions = 1
bad_size = 7 # prime number so that no size can divide it.
def test(input_shape, hidden_shape, mode):
for input, hidden in get_inputs(input_shape, hidden_shape, mode):
model = getattr(nn, mode)(input_size, hidden_size, num_layers)
self.assertRaises(RuntimeError, lambda: model(input, hidden))
correct_input_shape = (seq_len, batch_size, input_size)
correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)
def update_shape(shape, dim, new_dim_size):
new_shape = list(shape)
new_shape[dim] = new_dim_size
return tuple(new_shape)
def get_inputs(input_shape, hidden_shape, mode):
'''returns list( tuple(input, hidden) )
where input, hidden are inputs to a model'''
input = torch.randn(input_shape)
hidden = torch.randn(hidden_shape)
if mode is not 'LSTM':
return [(input, hidden)]
if hidden_shape == correct_hidden_shape:
return [(input, (hidden, hidden))]
good_hidden = torch.randn(correct_hidden_shape)
return [
(input, (hidden, good_hidden)),
(input, (good_hidden, hidden)),
]
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
# Incorrect input batch size
input_shape = update_shape(correct_input_shape, 1, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden batch size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect input size
input_shape = update_shape(correct_input_shape, 2, bad_size)
hidden_shape = correct_hidden_shape
test(input_shape, hidden_shape, mode)
# Incorrect hidden size
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)
test(input_shape, hidden_shape, mode)
# Incorrect hidden[0]
input_shape = correct_input_shape
hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)
test(input_shape, hidden_shape, mode)
def test_rnn_initial_hidden_state(self):
rnn_modes = ['RNN', 'GRU', 'LSTM']
for mode in rnn_modes:
rnn = getattr(nn, mode)(30, 20, 2)
input = torch.randn(10, 32, 30)
hidden = torch.zeros(2, 32, 20)
if mode is 'LSTM':
hidden = (hidden, hidden)
output1, hidden1 = rnn(input, hidden)
output2, hidden2 = rnn(input)
self.assertEqual(output1, output2)
self.assertEqual(hidden1, hidden2)
def _test_rnn_retain_variables(self, device="cpu", dtype=torch.double):
rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),
nn.GRU(10, 20, num_layers=2).to(device, dtype),
nn.RNN(10, 20, num_layers=2).to(device, dtype)]
for rnn in rnns:
input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)
output = rnn(input)
output[0].sum().backward(retain_graph=True)
grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]
for i in range(4):
rnn.zero_grad()
input.grad.data.zero_()
output[0].sum().backward(retain_graph=True)
grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]
self.assertEqual(grads, grads2)
def test_rnn_retain_variables(self):
self._test_rnn_retain_variables()
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_rnn_retain_variables_cuda(self, dtype=torch.float):
with torch.backends.cudnn.flags(enabled=False):
self._test_rnn_retain_variables("cuda", dtype)
self._test_rnn_retain_variables("cuda", dtype)
def _test_RNN_cpu_vs_cudnn(self, dropout):
def forward_backward(cuda, rnn, input_val, hx_val, grad_output, grad_hy, weights_val):
is_lstm = isinstance(rnn, nn.LSTM)
for x_layer, y_layer in zip(rnn.all_weights, weights_val):
for x, y in zip(x_layer, y_layer):
x.data.copy_(y.data)
if isinstance(input_val, rnn_utils.PackedSequence):
input = rnn_utils.PackedSequence(
Variable(input_val.data.data, requires_grad=True), input_val.batch_sizes)
input_var = input.data
else:
input = Variable(input_val.clone(), requires_grad=True)
input_var = input
if is_lstm:
hx = (Variable(hx_val.clone(), requires_grad=True),
Variable(hx_val.add(1), requires_grad=True))
else:
hx = Variable(hx_val.clone(), requires_grad=True)
if cuda:
rnn.cuda()
input_var.data = input_var.data.cuda()
if is_lstm:
hx[0].data = hx[0].data.cuda()
hx[1].data = hx[1].data.cuda()
else:
hx.data = hx.data.cuda()
grad_hy = grad_hy.cuda()
grad_output = grad_output.cuda()
output, hy = rnn(input, hx)
if isinstance(output, rnn_utils.PackedSequence):
output = output.data
if is_lstm:
torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])
else:
torch.autograd.backward([output, hy], [grad_output, grad_hy])
return {'output': output.data,
'hy': hy[0].data if is_lstm else hy.data,
'weights': rnn.all_weights,
'grad_input': input_var.grad.data,
'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,
'cy': hy[1].data if is_lstm else None,
'grad_cx': hx[1].grad.data if is_lstm else None}
input_size = 10
hidden_size = 6
num_layers = 2
seq_length = 7
batch = 6
def make_noncontig(tensor):
ndim = tensor.dim()
return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
def compare_cpu_gpu(outputs_cpu, outputs_gpu):
self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))
for key in outputs_cpu.keys():
if key != 'weights':
self.assertEqual(outputs_cpu[key], outputs_gpu[key], prec=5e-5, message=key)
# check grad weights separately, as nested dict
for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):
for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):
self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, prec=5e-5)
for module in (nn.RNN, nn.LSTM, nn.GRU):
for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \
in product((True, False), repeat=6):
num_directions = 2 if bidirectional else 1
if batch_first:
input_val = torch.randn(batch, seq_length, input_size)
grad_output = torch.randn(batch, seq_length, hidden_size * num_directions)
else:
input_val = torch.randn(seq_length, batch, input_size)
grad_output = torch.randn(seq_length, batch, hidden_size * num_directions)
if not contig:
grad_output = make_noncontig(grad_output)
grad_hy = make_noncontig(grad_hy)
input_var = make_noncontig(input_val)
hx_val = make_noncontig(hx_val)
hx_val = torch.randn(num_layers * num_directions, batch, hidden_size)
grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size)
if variable_len:
lengths = [7, 5, 5, 2, 1, 1]
if lens_as_tensor:
lengths = torch.tensor(lengths, dtype=torch.long)
input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)
grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data
rnn = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first)
outputs_cpu = forward_backward(
False, rnn, input_val, hx_val, grad_output, grad_hy, rnn.all_weights)
rnn_gpu = module(input_size,
hidden_size,
num_layers,
bias=bias,
dropout=dropout,
bidirectional=bidirectional,
batch_first=batch_first)
outputs_gpu = forward_backward(
True, rnn_gpu, input_val, hx_val, grad_output, grad_hy, rnn.all_weights)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
for nonlinearity in ('tanh', 'relu'):
hx_val = torch.randn(num_layers, batch, hidden_size)
input_val = torch.randn(seq_length, batch, input_size)
grad_output = torch.randn(
seq_length, batch, hidden_size * num_directions)
grad_hy = torch.randn(
num_layers * num_directions, batch, hidden_size)
rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity)
outputs_cpu = forward_backward(False, rnn, input_val, hx_val, grad_output, grad_hy, rnn.all_weights)
rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity)
outputs_gpu = forward_backward(True, rnn_gpu, input_val, hx_val, grad_output, grad_hy, rnn.all_weights)
compare_cpu_gpu(outputs_cpu, outputs_gpu)
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
@default_tensor_type(torch.FloatTensor) # FIXME: just until torch.cuda.DoubleTensor.sum() implemented
def test_RNN_cpu_vs_cudnn_no_dropout(self):
self._test_RNN_cpu_vs_cudnn(0)
@unittest.skipIf(not (TEST_CUDNN and TEST_CUDNN_VERSION >= 5103), "needs cudnn >= 5.1")
@default_tensor_type(torch.FloatTensor) # FIXME: just until torch.cuda.DoubleTensor.sum() implemented
def test_RNN_cpu_vs_cudnn_with_dropout(self):
# Because of dropout randomness, can only compare dropout=0 and dropout=1
self._test_RNN_cpu_vs_cudnn(1)
@unittest.skipIf(not (TEST_CUDNN and TEST_CUDNN_VERSION >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout(self):
# checking the assumption that cuDNN sticks dropout in between
# RNN layers
for p in (0, 0.276, 0.731, 1):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
rnn.weight_ih_l0.data.fill_(1)
rnn.weight_hh_l0.data.fill_(1)
rnn.weight_ih_l1.data.fill_(1)
rnn.weight_hh_l1.data.fill_(1)
input = torch.ones(1, 1, 10)
hx = torch.zeros(2, 1, 1000)
if cuda:
input = input.cuda()
hx = hx.cuda()
output, hy = rnn(input, hx)
self.assertEqual(output.data.min(), output.data.max())
output_val = output.data[0][0][0]
if p == 0 or not train:
self.assertEqual(output_val, 10000)
elif p == 1:
self.assertEqual(output_val, 0)
else:
self.assertGreater(output_val, 8000)
self.assertLess(output_val, 12000)
denorm_mod = (output_val * (1 - p)) % 10
self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)
self.assertEqual(hy[0].data.min(), hy[0].data.max())
self.assertEqual(hy[1].data.min(), hy[1].data.max())
self.assertEqual(hy.data[0][0][0], 10)
self.assertEqual(hy.data[1][0][0], output_val)
@unittest.skipIf(not (TEST_CUDNN and TEST_CUDNN_VERSION >= 5103), "needs cudnn >= 5.1")
def test_RNN_dropout_state(self):
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
for p in (0, 0.1234):
for train in (True, False):
for cuda in (True, False):
rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')
if cuda:
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
input = torch.rand(1, 1, 100)
hx = torch.rand(2, 1, 100)
if cuda:
input = input.cuda()
hx = hx.cuda()
output1, hy1 = rnn(input, hx)
output2, hy2 = rnn(input, hx)
rnn_pickle = pickle.dumps(rnn)
rnn2 = pickle.loads(rnn_pickle)
rnn2.flatten_parameters()
output3, hy3 = rnn2(input, hx)
if p == 0 or not train:
self.assertEqual(output1, output2)
self.assertEqual(output1, output3)
self.assertEqual(hy1, hy2)
self.assertEqual(hy1, hy3)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(output1, output3)
self.assertNotEqual(hy1, hy2)
self.assertNotEqual(hy1, hy3)
@unittest.skipIf(not (TEST_CUDNN and TEST_CUDNN_VERSION >= 5103), "needs cudnn >= 5.1")
def test_RNN_change_dropout(self):
for train, cuda in product((True, False), repeat=2):
rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')
input = torch.rand(3, 2, 100)
if cuda:
input.data = input.data.cuda()
rnn.cuda()
if train:
rnn.train()
else:
rnn.eval()
prev_output = None
for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):
rnn.dropout = p
output1, hy1 = rnn(input)
output2, hy2 = rnn(input)
if p == 0 or p == 1 or not train:
self.assertEqual(output1, output2)
self.assertEqual(hy1, hy2)
else:
self.assertNotEqual(output1, output2)
self.assertNotEqual(hy1, hy2)
if prev_output is not None:
if not train:
self.assertEqual(output1.data, prev_output)
self.assertEqual(output2.data, prev_output)
else:
self.assertNotEqual(output1.data, prev_output)
self.assertNotEqual(output2.data, prev_output)
prev_output = output1.data
def _verify_pixel_shuffle(self, input, output, upscale_factor):
for c in range(output.size(1)):
for h in range(output.size(2)):
for w in range(output.size(3)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \
(c * upscale_factor ** 2)
self.assertEqual(output[:, c, h, w], input[:, channel_idx, height_idx, weight_idx])
def test_inplace_thnn(self):
modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]
for mod in modules:
r = mod(inplace=True)
input = torch.randn(5, 5, requires_grad=True)
output = r(input + 0)
grad_output = torch.randn(5, 5)
grad_output_clone = grad_output.clone()
output.backward(grad_output)
self.assertEqual(grad_output, grad_output_clone)
@unittest.skipIf(not TEST_CUDA, 'CUDA not available')
@repeat_test_for_types(ALL_TENSORTYPES)
def test_noncontig_conv_grad_cuda(self, dtype=torch.float):
# FIXME: remove after adding non-contiguous grad tests for all modules
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to("cuda", dtype)
input = torch.randn(2, 3, 10, 10, dtype=dtype, device="cuda", requires_grad=True)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device="cuda")[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(result, input.grad.data, dtype2prec[dtype])
def test_pixel_shuffle(self):
batch_size = random.randint(1, 3)
upscale_factor = random.randint(2, 5)
channels = random.randint(1, 4) * upscale_factor ** 2
height = random.randint(5, 10)
width = random.randint(5, 10)
input = torch.rand(batch_size, channels, height, width, requires_grad=True)
ps = nn.PixelShuffle(upscale_factor)
output = ps(input)
self._verify_pixel_shuffle(input.data, output.data, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
def test_elu_inplace_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.elu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_relu_inplace_view(self):
v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)
def func(root):
x = root.clone()
view = x.narrow(0, 1, 2)
res = F.relu(view, inplace=True)
self.assertIs(res, view)
return x
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_bce_loss_always_nonnegative(self):
target = torch.ones(5)
input = torch.ones(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
target = torch.zeros(5)
input = torch.zeros(5)
self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)
def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):
target = torch.rand(5)
input = torch.rand(5, 1)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
target = torch.rand(5, 1)
input = torch.rand(5)
with self.assertRaises(ValueError):
nn.BCEWithLogitsLoss()(input, target)
def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):
sigmoid = nn.Sigmoid()
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
weight = torch.rand(4)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
target = torch.zeros(4, 1, dtype=torch.float)
output = torch.empty(4, 1, dtype=torch.float).fill_(-100)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))
self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),
nn.BCELoss(reduction='none')(sigmoid(output), target))
weight = torch.rand(1, dtype=torch.float)
self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))
def test_bce_with_logits_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
self.assertEqual(output.grad, expected_grad)
def test_bce_with_logits_broadcasts_weights(self):
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCEWithLogitsLoss(weight)(output, target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCEWithLogitsLoss(weight)(output, target)
self.assertEqual(out1, out2)
def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.ones(64, 4)
self.assertEqual(nn.BCEWithLogitsLoss()(output, target),
nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))
def test_bce_with_logits_broadcasts_pos_weights(self):
target = torch.rand(64, 4)
output = torch.rand(64, 4) - 0.5
pos_weight = torch.rand(4)
out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)
pos_weight1 = pos_weight.expand(1, 4)
out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)
pos_weight2 = pos_weight.expand(64, 4)
out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)
self.assertEqual(out1, out2)
self.assertEqual(out1, out3)
def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):
output = torch.zeros(3, 1, requires_grad=True)
target = torch.zeros(3, 1)
pos_weight = torch.ones(3, 1)
nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()
expected_grad = torch.empty(3, 1).fill_(0.5)
grad = output.grad
self.assertEqual(grad, expected_grad)
def test_bce_loss_broadcasts_weights(self):
sigmoid = nn.Sigmoid()
target = torch.rand(16, 4)
output = torch.rand(16, 4) - 0.5
weight = torch.rand(4)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
weight = torch.rand(16, 1)
out1 = nn.BCELoss(weight)(sigmoid(output), target)
weight = weight.expand(16, 4).contiguous()
out2 = nn.BCELoss(weight)(sigmoid(output), target)
self.assertEqual(out1, out2)
def test_elu_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.elu(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
def test_hardtanh_inplace_gradgrad(self):
v = torch.randn(8, requires_grad=True)
def func(root):
x = root.clone()
return F.hardtanh(x, inplace=True)
gradcheck(func, [v])
gradgradcheck(func, [v])
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_cudnn_half(self):
# THNN
input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True)
m = nn.BatchNorm2d(3).half().cuda()
thnn_output = m(input)
thnn_output.sum().backward()
thnn_input_grad = input.grad.data.clone()
self.assertEqual(thnn_output.type(), input.type())
# cuDNN
if TEST_CUDNN:
input.grad = None
m = m.float()
cudnn_output = m(input)
cudnn_output.sum().backward()
cudnn_input_grad = input.grad.data.clone()
self.assertEqual(cudnn_output.type(), input.type())
self.assertEqual(cudnn_output, thnn_output)
self.assertAlmostEqual(cudnn_input_grad, thnn_input_grad, delta=1e-3)
def _test_batchnorm_update_stats(self, device="cpu", dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype)
# training pass
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertNotEqual(old_running_mean, module.running_mean)
self.assertNotEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)
# eval pass
module.eval()
old_running_mean = module.running_mean.clone()
old_running_var = module.running_var.clone()
old_num_batches_tracked = module.num_batches_tracked.clone()
module(data)
self.assertEqual(old_running_mean, module.running_mean)
self.assertEqual(old_running_var, module.running_var)
self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)
def test_batchnorm_update_stats(self):
self._test_batchnorm_update_stats()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_batchnorm_update_stats_cuda(self):
self._test_batchnorm_update_stats("cuda", torch.float)
def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, torch.rand(size), running_var)
def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, torch.rand(size))
def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))
def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):
input = torch.rand(2, 10)
running_mean = torch.rand(10)
running_var = torch.rand(10)
wrong_sizes = [9, 11]
for size in wrong_sizes:
with self.assertRaises(RuntimeError):
F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))
def _test_batchnorm_eval(self, device="cpu", dtype=torch.float):
module = nn.BatchNorm1d(3).to(device, dtype)
module.eval()
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
# track_running_stats=False
module = nn.BatchNorm1d(3, track_running_stats=False).to(device, dtype)
data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)
grad = torch.rand(4, 3, device=device, dtype=dtype)
# 1st pass
res1 = module(data)
res1.backward(grad)
grad1 = data.grad.clone()
# set eval
module.eval()
# 2nd pass
if data.grad is not None:
data.grad.data.zero_()
res2 = module(data)
res2.backward(grad)
grad2 = data.grad.clone()
self.assertEqual(res1, res2)
self.assertEqual(grad1, grad2)
def _test_batchnorm_simple_average(self, test_type=torch.FloatTensor):
module = nn.BatchNorm1d(3, momentum=None).type(test_type)
zeros = torch.zeros(3).type(test_type)
ones = torch.ones(3).type(test_type)
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
data1 = torch.rand(4, 3).type(test_type)
data2 = torch.rand(4, 3).type(test_type)
# 1st pass
res1 = module(data1)
running_mean1 = module.running_mean.clone()
running_var1 = module.running_var.clone()
self.assertNotEqual(running_mean1, zeros)
self.assertNotEqual(running_var1, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 2nd pass
res2 = module(data2)
running_mean2 = module.running_mean.clone()
running_var2 = module.running_var.clone()
self.assertNotEqual(running_mean2, zeros)
self.assertNotEqual(running_var2, ones)
# reset stats
module.reset_running_stats()
self.assertEqual(module.running_mean, zeros)
self.assertEqual(module.running_var, ones)
# 3rd (combined) pass
res3 = module(data1)
res4 = module(data2)
self.assertEqual(res3, res1)
self.assertEqual(res4, res2)
self.assertAlmostEqual(module.running_mean, (running_mean1 + running_mean2) / 2)
self.assertAlmostEqual(module.running_var, (running_var1 + running_var2) / 2)
def test_pairwise_distance(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))
def test_cosine_embedding_loss_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))
def test_cosine_embedding_loss_margin_no_reduce(self):
input1 = torch.randn(15, 10, requires_grad=True)
input2 = torch.randn(15, 10, requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,
margin=0.5, reduction='none'))
def test_margin_ranking_loss_no_reduce(self):
input1 = torch.tensor(torch.randn(15).mul(10), requires_grad=True)
input2 = torch.tensor(torch.randn(15).mul(10), requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))
def test_margin_ranking_loss_margin_no_reduce(self):
input1 = torch.tensor(torch.randn(15).mul(10), requires_grad=True)
input2 = torch.tensor(torch.randn(15).mul(10), requires_grad=True)
target = torch.randn(15).sign()
self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(
x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))
self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),
loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))
def test_triplet_margin_loss(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3))
def test_triplet_margin_loss_swap(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))
def test_triplet_margin_loss_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))
def test_triplet_margin_loss_swap_no_reduce(self):
input1 = torch.randn(5, 10, requires_grad=True)
input2 = torch.randn(5, 10, requires_grad=True)
input3 = torch.randn(5, 10, requires_grad=True)
self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(
x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))
self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),
loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))
def test_pointwise_loss_target_grad_none_reduction(self):
i = torch.randn(5, 10)
t = torch.randn(5, 10, requires_grad=True)
self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())
self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())
def test_cosine_similarity(self):
input1 = torch.randn(4, 4, requires_grad=True)
input2 = torch.randn(4, 4, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y), (input1, input2)))
input1 = torch.randn(4, 5, 6, requires_grad=True)
input2 = torch.randn(4, 5, 6, requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=0), (input1, input2)))
self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=-1), (input1, input2)))
input1 = torch.randn((), requires_grad=True)
input2 = torch.randn((), requires_grad=True)
self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=0), (input1, input2)))
self.assertTrue(gradcheck(lambda x, y: F.cosine_similarity(x, y, dim=-1), (input1, input2)))
# Check cosine_similarity input/output shapes
input_size = (1, 3, 2, 1)
expected_size = (1, 2, 1)
input1 = torch.randn(input_size, requires_grad=True)
input2 = torch.randn(input_size, requires_grad=True)
self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)
def test_grid_sample_unsupported_mode(self):
with self.assertRaisesRegex(NotImplementedError, "nn.functional.grid_sample got unsupported mode: 'garbage'"):
F.grid_sample(torch.tensor([]), torch.tensor([]), mode='garbage')
def test_grid_sample(self):
def test_cpu_against_cuda(N, C, H, W, padding_mode):
def test_shape(N, C, IH, IW, H, W, padding_mode):
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = torch.randn(H, N, W, 2).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, padding_mode=padding_mode)
self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, padding_mode=padding_mode)
self.assertEqual(out_cpu, out_cuda)
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, prec=5e-5)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
grid_cpu = torch.randn(N, H, W, 2, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, padding_mode=padding_mode)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, padding_mode=padding_mode)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, H, W, H, W, padding_mode)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, padding_mode)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, padding_mode)
# test known input on CPU
for padding_mode in ['zeros', 'border']:
input = Variable(torch.arange(1., 11).view(1, 1, 2, 5))
grid = Variable(torch.Tensor(
[[-0.9, -1.4, 0, 0.2, 1],
[-1, -0.333, 0, 0.5, 1],
[-1, -0.5, 0, 0.3333, 1],
[-1, -0.2, 0, 1.1, 0.5]]).view(1, 2, 5, 2))
output = F.grid_sample(input, grid, padding_mode=padding_mode)
if padding_mode == 'zeros':
groundtruth = torch.Tensor(
[[0.9600, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.333250045, 5.0000, 5.1000, 7.0000]]).view(1, 1, 2, 5)
else:
groundtruth = torch.Tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.333250045, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)
self.assertEqual(output.data, groundtruth)
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
input = torch.randn(N, C, H, W, requires_grad=True)
grid = torch.randn(N, H, W, 2, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, padding_mode=padding_mode),
(input, grid)))
# test CUDA against CPU
if TEST_CUDA:
test_cpu_against_cuda(N, C, H, W, padding_mode)
if TEST_CUDNN:
with cudnn.flags(enabled=False):
test_cpu_against_cuda(N, C, H, W, padding_mode)
def test_grid_sample_3d(self):
def test_cpu_against_cuda(N, C, D, H, W, padding_mode):
def test_shape(N, C, ID, IH, IW, D, H, W, padding_mode):
input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_()
grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, padding_mode=padding_mode)
self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))
input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, padding_mode=padding_mode)
self.assertEqual(out_cpu, out_cuda)
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
out_cuda.backward(gradients.cuda())
self.assertEqual(input_cpu.grad, input_cuda.grad)
self.assertEqual(grid_cpu.grad, grid_cuda.grad, prec=5e-5)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IH, IW)
input_cpu = base_input.expand_as(input_cuda).requires_grad_()
grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)
out_cpu = F.grid_sample(input_cpu, grid_cpu, padding_mode=padding_mode)
input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_()
grid_cuda = grid_cpu.detach().cuda().requires_grad_()
out_cuda = F.grid_sample(input_cuda, grid_cuda, padding_mode=padding_mode)
self.assertEqual(out_cpu, out_cuda)
# test same size output
test_shape(N, C, D, H, W, D, H, W, padding_mode)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
ID = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
D = random.randint(ID + 1, 12)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, ID, IH, IW, D, H, W, padding_mode)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
ID = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
D = random.randint(2, ID)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, ID, IH, IW, D, H, W, padding_mode)
# test known input on CPU
for padding_mode in ['zeros', 'border']:
# do gradcheck
N = random.randint(2, 8)
C = random.randint(2, 8)
D = random.randint(2, 8)
H = random.randint(2, 8)
W = random.randint(2, 8)
input = torch.randn(N, C, D, H, W, requires_grad=True)
grid = torch.randn(N, D, H, W, 3, requires_grad=True)
self.assertTrue(gradcheck(
lambda inp, grid: F.grid_sample(inp, grid, padding_mode=padding_mode),
(input, grid)))
# test CUDA against CPU
if TEST_CUDA:
test_cpu_against_cuda(N, C, D, H, W, padding_mode)
def test_affine_grid(self):
# test known input on CPU
input = Variable(torch.arange(1., 7).view(1, 2, 3))
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]))
groundtruth = torch.Tensor(
[[[0, -3], [2, 5]], [[4, 7], [6, 15]]]).view(1, 2, 2, 2)
self.assertEqual(output.data, groundtruth)
# do gradcheck
N = random.randint(1, 8)
C = random.randint(1, 8)
H = random.randint(1, 8)
W = random.randint(1, 8)
sz = torch.Size([N, C, H, W])
inp = torch.randn(N, 2, 3, requires_grad=True)
self.assertTrue(gradcheck(lambda inp: F.affine_grid(inp, sz), (inp,)))
# test CPU against CUDA
if TEST_CUDNN:
input_cpu = torch.randn(N, 2, 3, requires_grad=True)
out_cpu = F.affine_grid(input_cpu, sz)
gradients = torch.randn(out_cpu.size())
out_cpu.backward(gradients)
input_gpu = Variable(input_cpu.data.cuda(), requires_grad=True)
out_cuda = F.affine_grid(input_gpu, sz)
out_cuda.backward(gradients.cuda())
self.assertEqual(out_cpu, out_cuda)
self.assertEqual(input_cpu.grad, input_gpu.grad)
def test_upsamplingNearest1d(self):
m = nn.Upsample(size=4, mode='nearest')
in_t = torch.ones(1, 1, 2)
out_t = m(Variable(in_t))
self.assertEqual(torch.ones(1, 1, 4), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
def test_upsamplingLinear1d(self):
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size), out_t.data)
input = torch.randn(1, 1, 2, requires_grad=True)
gradcheck(lambda x: F.upsample(x, out_size, **kwargs), (input,))
def test_upsamplingLinear1d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9)
in_t_9[:, :, :4].normal_()
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5])
self.assertEqual(out_t_9[:, :, :15], out_t_5)
def test_upsamplingNearest2d(self):
m = nn.Upsample(size=4, mode='nearest')
in_t = torch.ones(1, 1, 2, 2)
out_t = m(Variable(in_t))
self.assertEqual(torch.ones(1, 1, 4, 4), out_t.data)
input = torch.randn(1, 1, 2, 2, requires_grad=True)
self.assertEqual(
F.upsample(input, 4, mode='nearest'),
F.upsample(input, scale_factor=2, mode='nearest'))
gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
gradgradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
def test_upsamplingBilinear2d(self):
for align_corners in [True, False]:
kwargs = dict(mode='bilinear', align_corners=align_corners)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size, out_size), out_t.data)
input = torch.randn(1, 1, 2, 2, requires_grad=True)
gradcheck(lambda x: F.upsample(x, out_size, **kwargs), [input])
def test_upsamplingBilinear2d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9)
in_t_9[:, :, :4, :4].normal_()
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)
def test_upsamplingNearest3d(self):
m = nn.Upsample(size=4, mode='nearest')
in_t = torch.ones(1, 1, 2, 2, 2)
out_t = m(Variable(in_t))
self.assertEqual(torch.ones(1, 1, 4, 4, 4), out_t.data)
input = torch.randn(1, 1, 2, 2, 2, requires_grad=True)
gradcheck(lambda x: F.upsample(x, 4, mode='nearest'), [input])
def test_upsamplingTrilinear3d(self):
for align_corners in [True, False]:
kwargs = dict(mode='trilinear', align_corners=align_corners)
# test float scale factor up & downsampling
for scale_factor in [0.5, 1.5, 2]:
m = nn.Upsample(scale_factor=scale_factor, **kwargs)
in_t = torch.ones(1, 1, 2, 2, 2)
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
out_t = m(in_t)
self.assertEqual(torch.ones(1, 1, out_size, out_size, out_size), out_t.data)
input = torch.randn(1, 1, 2, 2, 2, requires_grad=True)
self.assertEqual(
F.upsample(input, (out_size, out_size, out_size), **kwargs),
F.upsample(input, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.upsample(x, out_size, **kwargs), [input])
gradgradcheck(lambda x: F.upsample(x, out_size, **kwargs), [input])
def test_upsamplingTrilinear3d_spatial_invariance(self):
m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)
in_t_9 = torch.zeros(1, 1, 9, 9, 9)
in_t_9[:, :, :4, :4, :4].normal_()
out_t_9 = m(in_t_9)
out_t_5 = m(in_t_9[:, :, :5, :5, :5])
self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)
def test_interpolate(self):
def _test_interpolate_helper(in_t, scale_factor, layer):
out_size = int(math.floor(in_t.shape[-1] * scale_factor))
dim = len(in_t.shape) - 2
out_shape = [1, 1] + [out_size] * dim
out_t = m(in_t)
self.assertEqual(torch.ones(out_shape), out_t)
self.assertEqual(
F.interpolate(in_t, (out_size,) * dim, **kwargs),
F.interpolate(in_t, scale_factor=scale_factor, **kwargs))
gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t])
gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t])
def _make_input(dim):
size = [1, 1]
size += [2] * dim
return torch.ones(size, requires_grad=True)
device_list = ['cpu']
if TEST_CUDA:
device_list.append('cuda')
for device in device_list:
for scale_factor in [0.5, 1.5, 2]:
for mode in ['nearest', 'area']:
kwargs = dict(mode=mode)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
for input in [_make_input(1), _make_input(2), _make_input(3)]:
_test_interpolate_helper(input, scale_factor, m)
for align_corners in [True, False]:
kwargs = dict(mode='linear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(1), scale_factor, m)
kwargs = dict(mode='bilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(2), scale_factor, m)
kwargs = dict(mode='trilinear', align_corners=align_corners)
m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)
_test_interpolate_helper(_make_input(3), scale_factor, m)
def test_linear_broadcasting(self):
m = nn.Linear(5, 8)
inp = torch.randn(2, 3, 5)
expected = m(inp.view(6, 5)).view(2, 3, 8)
self.assertEqual(expected, m(inp))
def test_bilinear(self):
module = nn.Bilinear(10, 10, 8)
module_legacy = legacy.Bilinear(10, 10, 8)
module_legacy.weight.copy_(module.weight.data)
module_legacy.bias.copy_(module.bias.data)
input1 = torch.randn(4, 10)
input2 = torch.randn(4, 10)
output = module(Variable(input1), Variable(input2))
output_legacy = module_legacy.forward([input1, input2])
self.assertEqual(output.data, output_legacy)
input1_1 = torch.tensor(input1, requires_grad=True)
input2_1 = torch.tensor(input2, requires_grad=True)
module.zero_grad()
module_legacy.zeroGradParameters()
output = module(input1_1, input2_1)
grad_output = torch.randn(*output.size())
gi1_legacy, gi2_legacy = module_legacy.backward([input1, input2], grad_output)
output.backward(grad_output)
gi1 = input1_1.grad.data.clone()
gi2 = input2_1.grad.data.clone()
self.assertEqual(gi1, gi1_legacy)
self.assertEqual(gi2, gi2_legacy)
self.assertEqual(module.weight.grad.data, module_legacy.gradWeight)
self.assertEqual(module.bias.grad.data, module_legacy.gradBias)
_assertGradAndGradgradChecks(self, lambda x1, x2: F.bilinear(x1, x2, module.weight, module.bias),
(input1_1, input2_1))
def test_bilinear_no_bias(self):
module = nn.Bilinear(10, 10, 8)
module_no_bias = nn.Bilinear(10, 10, 8, False)
module.bias.data.zero_()
module.weight.data.copy_(module_no_bias.weight)
input1 = torch.randn(4, 10, requires_grad=True)
input2 = torch.randn(4, 10, requires_grad=True)
grad_output = torch.randn(4, 8)
def run(net):
input1.grad = input2.grad = None
output = net(input1, input2)
output.backward(grad_output)
return output.data, input1.grad.data, input2.grad.data
out, g1, g2 = run(module)
out_nb, g1_nb, g2_nb = run(module_no_bias)
self.assertEqual(out, out_nb)
self.assertEqual(g1, g1_nb)
self.assertEqual(g2, g2_nb)
_assertGradAndGradgradChecks(self,
lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),
(input1, input2))
def test_bilinear_broadcasting(self):
m = nn.Bilinear(5, 6, 8)
input1 = torch.randn(2, 3, 5)
input2 = torch.randn(2, 3, 6)
expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)
self.assertEqual(expected, m(input1, input2))
def test_conv_tbc(self):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))
@staticmethod
def _test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (True, False):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
x = torch.randn([1, nc] + ([5] * dim), device=device)
getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)
getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)
def test_conv_noncontig_weights(self):
self._test_conv_noncontig_weights(self, torch.device('cpu'))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_conv_noncontig_weights_cuda(self):
self._test_conv_noncontig_weights(self, torch.device('cuda'))
def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,
inp_size, dilation, no_weight, groups=1, use_cuda=False,
use_bias=True, dtype=torch.double):
if use_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,
dtype=dtype, requires_grad=True)
weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,
dtype=dtype, requires_grad=not no_weight)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
# We disable cudnn during forward to avoid finite difference imprecision issues
with cudnn.flags(enabled=False):
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)
return gradgradcheck(func, inputs, (grad_y,))
def test_conv_double_backward(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 6, [1, 2]), (3, 7, [1]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [0, 1, 2], [2], [3], dilations):
for no_weight in (True, False):
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_no_bias(self):
kern = 3
stride = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 5
padding = 1
dilation = 1
no_weight = False
use_bias = True
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_bias=use_bias)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def test_conv_double_backward_groups(self):
kern = 3
stride = 1
padding = 2
chan_in, chan_out = 2, 4
batch_size = 2
inp_size = 6
dilation = 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in * groups, chan_out * groups,
batch_size, inp_size, dilation,
no_weight, groups=groups)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation) +
"\ngroups: " + str(groups))
def test_conv_double_backward_stride(self):
batch_size = 2
# Cannot provide ggW when stride is > 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):
no_weight = False
self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
# Noncontiguous weights must be contiguous() before being
# passed to cuDNN
input = Variable(torch.cuda.DoubleTensor([1, 1, 1]).view(1, 1, 3))
weights1 = Variable(torch.cuda.DoubleTensor([1]).expand(1, 1, 2))
weights2 = Variable(torch.cuda.DoubleTensor([1]).expand(1, 1, 2)).contiguous()
self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@repeat_test_for_types(DOUBLE_TENSORTYPES)
def test_conv_double_backward_cuda(self, dtype=torch.double):
# Double backward only runs with DoubleTensor due to precison reason
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):
no_weight = stride == 2
result = self.run_conv_double_back_test(kern, stride,
padding, chan_in, chan_out,
batch_size, inp_size, dilation,
no_weight, use_cuda=True, dtype=dtype)
self.assertTrue(result,
"Conv double backward test failed with parameters:" +
"\nkern: " + str(kern) +
"\nstride: " + str(stride) +
"\npadding: " + str(padding) +
"\nchan_in: " + str(chan_in) +
"\nchan_out: " + str(chan_out) +
"\nbatch_size: " + str(batch_size) +
"\ninp_size: " + str(inp_size) +
"\ndilation: " + str(dilation))
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in \
product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)
self.assertAlmostEqual(gradient_w[0],
func_backward(
input_shape if (gradient == 'input') else input,
weight_shape if (gradient == 'weight') else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation))
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')
def test_fold_invalid_arg(self):
# input wrong dimension
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 3D input Tensors are supported"):
fold(torch.randn(1, 5))
# input.size(1) not divisible by \prod(kernel_size)
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 5, 9))
with self.assertRaisesRegex(RuntimeError, r"be divisible by the product of kernel_size"):
fold(torch.randn(1, 19, 9))
# input.size(2) not matching the total number of sliding blocks
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))
fold(torch.randn(1, 6, 10))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))
fold(torch.randn(1, 6, 5))
with self.assertRaisesRegex(RuntimeError, r"match the calculated number of sliding blocks"):
fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))
fold(torch.randn(1, 6, 5)) # should be 4 * 1 = 4 sliding blocks
def test_unfold_invalid_arg(self):
# input wrong dimension
unfold = nn.Unfold(kernel_size=(2, 3))
with self.assertRaisesRegex(NotImplementedError, r"Only 4D input Tensors are supported"):
unfold(torch.randn(1, 5, 2))
# calculated output shape is too small
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(2, 3))
unfold(torch.randn(1, 2, 2, 2))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))
unfold(torch.randn(1, 2, 2, 3))
with self.assertRaisesRegex(RuntimeError, r"too small \(non-positive\)"):
unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))
unfold(torch.randn(1, 2, 2, 2))
def test_softmin(self):
x = torch.randn(2, 16)
self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))
self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))
def test_adaptive_log_softmax(self):
# args validation
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)
with self.assertRaises(ValueError):
_ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)
# input shapes
with self.assertRaisesRegex(RuntimeError, r"Input and target should have the same size"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 5, 10])
asfm(x, y)
# out-of-bound targets
with self.assertRaisesRegex(RuntimeError, r"Target values should be in"):
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 20])
asfm(x, y)
# cluster sizes
asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)
x = torch.randn(2, 16)
y = torch.tensor([0, 17])
self.assertEqual(asfm.head.weight.size(), (5 + 3, 16)) # 5 targets in head, 3 clusters, dimensionality 16
self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8)) # 5 targets in this cluster, dimensionality 8
self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))
self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))
self.assertEqual(asfm(x, y).output.size(), (2, ))
# log_probs actually returns log_proba
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)
x = torch.randn(4, 8)
logprob_out = asfm.log_prob(x)
self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))
# forward returns the same thing as log_probs
for v in [0, 1, 2, 3]:
y = torch.full((4,), v, dtype=torch.long)
out, loss = asfm(x, y)
self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())
self.assertEqual(loss, F.nll_loss(logprob_out, y))
# predict
x = torch.randn(64, 8).abs_()
# argmax in shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[asfm.shortlist_size:, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# argmax outside of shortlist
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
asfm.head.weight.data[:asfm.shortlist_size, :].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
# half of the argmax in shortlist, half in clusters
asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)
asfm.head.weight.data.abs_()
asfm.head.bias.data.abs_()
x[:32, :asfm.shortlist_size].zero_()
x[32:, asfm.shortlist_size:].zero_()
asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()
asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()
out = asfm.predict(x)
self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))
class TestNNInit(TestCase):
def setUp(self):
super(TestNNInit, self).setUp()
random.seed(123)
def _is_normal(self, tensor, mean, std):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]
return p_value > 0.0001
def _is_uniform(self, tensor, a, b):
samples = tensor.view(-1).tolist()
p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]
return p_value > 0.0001
def _create_random_nd_tensor(self, dims, size_min, size_max):
size = [random.randint(size_min, size_max) for _ in range(dims)]
tensor = torch.zeros(size)
return tensor
def _random_float(self, a, b):
return (b - a) * random.random() + a
def test_calculate_gain_linear(self):
for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:
gain = init.calculate_gain(fn)
self.assertEqual(gain, 1)
def test_calculate_gain_nonlinear(self):
for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:
gain = init.calculate_gain(fn)
if fn == 'sigmoid':
self.assertEqual(gain, 1)
elif fn == 'tanh': # 5 / 3
self.assertEqual(gain, 1.6666666666666667)
elif fn == 'relu': # sqrt(2)
self.assertEqual(gain, 1.4142135623730951)
elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))
self.assertEqual(gain, 1.4141428569978354)
def test_calculate_gain_leaky_relu(self):
for param in [None, 0, 0.01, 10]:
gain = init.calculate_gain('leaky_relu', param)
if param is None: # Default slope is 0.01
self.assertEqual(gain, 1.4141428569978354)
elif param == 0: # No slope = same gain as normal ReLU
self.assertEqual(gain, 1.4142135623730951)
elif param == 0.01:
self.assertEqual(gain, 1.4141428569978354)
elif param == 10:
self.assertEqual(gain, 0.14071950894605836)
def test_calculate_gain_leaky_relu_only_accepts_numbers(self):
for param in [True, [1], {'a': 'b'}]:
with self.assertRaises(ValueError):
init.calculate_gain('leaky_relu', param)
def test_calculate_gain_only_accepts_valid_nonlinearities(self):
for n in [2, 5, 25]:
# Generate random strings of lengths that definitely aren't supported
random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])
with self.assertRaises(ValueError):
init.calculate_gain(random_string)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_uniform(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
a = self._random_float(-3, 3)
b = a + self._random_float(1, 5)
init.uniform_(input_tensor, a=a, b=b)
assert self._is_uniform(input_tensor, a, b)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_normal(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)
mean = self._random_float(-3, 3)
std = self._random_float(1, 5)
init.normal_(input_tensor, mean=mean, std=std)
assert self._is_normal(input_tensor, mean, std)
def test_constant(self):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
val = self._random_float(1, 10)
init.constant_(input_tensor, val)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_ones_and_zeros(self):
for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):
for dims in [1, 2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init_fn_(input_tensor)
self.assertEqual(input_tensor, input_tensor.clone().fill_(val))
def test_eye(self):
input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)
init.eye_(input_tensor)
# Check every single element
for i in range(input_tensor.size(0)):
for j in range(input_tensor.size(1)):
if i == j:
assert input_tensor[i][j] == 1
else:
assert input_tensor[i][j] == 0
def test_eye_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.eye_(tensor)
def test_max_unpool(self):
# Test 1D
output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))
# Test 2D
output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))
# Test 3D
output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4]), 2, stride=2, return_indices=True)
self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))
def test_dirac_properties(self):
for dims in [3, 4, 5]:
input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)
init.dirac_(input_tensor)
c_out, c_in = input_tensor.size(0), input_tensor.size(1)
min_d = min(c_out, c_in)
# Check number of nonzeros is equivalent to smallest dim
assert torch.nonzero(input_tensor).size(0) == min_d
# Check sum of values (can have precision issues, hence assertEqual) is also equivalent
self.assertEqual(input_tensor.sum(), min_d)
def test_dirac_identity(self):
batch, in_c, out_c, size, kernel_size = 8, 3, 4, 5, 3
# Test 1D
input_var = torch.randn(batch, in_c, size)
filter_var = torch.zeros(out_c, in_c, kernel_size)
init.dirac_(filter_var)
output_var = F.conv1d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero
self.assertEqual(input_tensor[:, :, 1:-1], output_tensor[:, :in_c, :]) # Assert in_c outputs are preserved
assert torch.nonzero(output_tensor[:, in_c:, :]).numel() == 0 # Assert extra outputs are 0
# Test 2D
input_var = torch.randn(batch, in_c, size, size)
filter_var = torch.zeros(out_c, in_c, kernel_size, kernel_size)
init.dirac_(filter_var)
output_var = F.conv2d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
assert torch.nonzero(output_tensor[:, in_c:, :, :]).numel() == 0
# Test 3D
input_var = torch.randn(batch, in_c, size, size, size)
filter_var = torch.zeros(out_c, in_c, kernel_size, kernel_size, kernel_size)
init.dirac_(filter_var)
output_var = F.conv3d(input_var, filter_var)
input_tensor, output_tensor = input_var.data, output_var.data
self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1], output_tensor[:, :in_c, :, :])
assert torch.nonzero(output_tensor[:, in_c:, :, :, :]).numel() == 0
def test_dirac_only_works_on_3_4_5d_inputs(self):
for dims in [1, 2, 6]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.dirac_(tensor)
def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_uniform_(tensor)
def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
with self.assertRaises(ValueError):
init.xavier_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_uniform(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_uniform_(input_tensor, gain=gain)
else:
init.xavier_uniform_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
bounds = expected_std * math.sqrt(3)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_xavier_normal(self):
for use_gain in [True, False]:
for dims in [2, 4]:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
gain = 1
if use_gain:
gain = self._random_float(0.1, 2)
init.xavier_normal_(input_tensor, gain=gain)
else:
init.xavier_normal_(input_tensor)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))
assert self._is_normal(input_tensor, 0, expected_std)
def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_uniform_(tensor)
def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):
for dims in [0, 1]:
with self.assertRaises(ValueError):
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)
init.kaiming_normal_(tensor)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_uniform(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_uniform_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_uniform_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
bounds = expected_std * math.sqrt(3.0)
assert self._is_uniform(input_tensor, -bounds, bounds)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_kaiming_normal(self):
for use_a in [True, False]:
for dims in [2, 4]:
for mode in ['fan_in', 'fan_out']:
input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)
if use_a:
a = self._random_float(0.1, 2)
init.kaiming_normal_(input_tensor, a=a, mode=mode)
else:
a = 0
init.kaiming_normal_(input_tensor, mode=mode)
fan_in = input_tensor.size(1)
fan_out = input_tensor.size(0)
if input_tensor.dim() > 2:
fan_in *= input_tensor[0, 0].numel()
fan_out *= input_tensor[0, 0].numel()
if mode == 'fan_in':
n = fan_in
else:
n = fan_out
expected_std = math.sqrt(2.0 / ((1 + a**2) * n))
assert self._is_normal(input_tensor, 0, expected_std)
def test_sparse_only_works_on_2d_inputs(self):
for dims in [1, 3]:
with self.assertRaises(ValueError):
sparsity = self._random_float(0.1, 0.9)
tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)
init.sparse_(tensor, sparsity)
@unittest.skipIf(not TEST_SCIPY, "Scipy not found.")
def test_sparse_default_std(self):
for use_random_std in [True, False]:
input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)
rows, cols = input_tensor.size(0), input_tensor.size(1)
sparsity = self._random_float(0.1, 0.2)
std = 0.01 # default std
if use_random_std:
std = self._random_float(0.01, 0.2)
init.sparse_(input_tensor, sparsity=sparsity, std=std)
else:
init.sparse_(input_tensor, sparsity=sparsity)
for col_idx in range(input_tensor.size(1)):
column = input_tensor[:, col_idx]
assert column[column == 0].nelement() >= math.ceil(sparsity * rows)
assert self._is_normal(input_tensor[input_tensor != 0], 0, std)
@skipIfNoLapack
def test_orthogonal(self):
for use_gain in [True, False]:
for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:
input_tensor = torch.zeros(tensor_size)
gain = 1.0
if use_gain:
gain = self._random_float(0.1, 2)
init.orthogonal_(input_tensor, gain=gain)
else:
init.orthogonal_(input_tensor)
rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])
flattened_tensor = input_tensor.view(rows, cols)
if rows > cols:
self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),
torch.eye(cols) * gain ** 2, prec=1e-6)
else:
self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),
torch.eye(rows) * gain ** 2, prec=1e-6)
def test_deprecation(self):
x = torch.randn(3, 3)
def fn():
init.normal(x)
self.assertWarnsRegex(fn, 'deprecated', 'methods not suffixed with underscore should be deprecated')
# Generates rand tensor with non-equal values. This ensures that duplicate
# values won't be causing test failure for modules like MaxPooling.
# size should be small, otherwise randperm fails / long overflows.
def _rand_tensor_non_equal(*size):
total = reduce(mul, size, 1)
return torch.randperm(total).view(*size).double()
def add_test(test, decorator=None):
def add(test_name, fn):
if hasattr(TestNN, test_name):
raise RuntimeError('Found two tests with the same name: ' + test_name)
if decorator is not None:
fn = decorator(fn)
setattr(TestNN, test_name, fn)
test_name = test.get_name()
add(test_name, lambda self, test=test: test(self))
cuda_test_name = test_name + '_cuda'
# With dtype enable, it's good enough to test against three floating types
kwargs = {}
if 'extra_args' in get_function_arglist(test.test_cuda):
kwargs['extra_args'] = test.extra_args
if 'dtype' in get_function_arglist(test.test_cuda):
add(cuda_test_name + '_float', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))
add(cuda_test_name + '_double', lambda self,
test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))
if getattr(test, 'check_half', True):
add(cuda_test_name + '_half', lambda self,
test=test: test.test_cuda(self, dtype=torch.half, **kwargs))
else:
add(cuda_test_name, lambda self, test=test, kwargs=kwargs: test.test_cuda(self, **kwargs))
def wrap_functional(fn, **kwargs):
class FunctionalModule(nn.Module):
def forward(self, *args):
return fn(*args, **kwargs)
return FunctionalModule
new_criterion_tests = [
dict(
module_name='BCEWithLogitsLoss',
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double()
),
dict(
module_name='BCEWithLogitsLoss',
constructor_args=(torch.rand(10),),
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
desc='weights'
),
dict(
module_name='BCEWithLogitsLoss',
constructor_args=(torch.rand(()),),
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(()).gt(0).double(),
desc='scalar_weights'
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='2d'
),
dict(
module_name='NLLLoss',
constructor_args_fn=lambda: (torch.rand(3),),
input_size=(2, 3, 5, 5),
target=torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, weight=get_weight(m)),
desc='2d_weights',
),
dict(
module_name='NLLLoss',
constructor_args=(None, None, 1),
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, ignore_index=1),
desc='2d_ignore_index',
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5, 5, 2, 2),
target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='higher_dim'
),
dict(
module_name='NLLLoss',
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m:
loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)),
check_sum_reduction=True,
desc='dim_is_3'
),
dict(
module_name='PoissonNLLLoss',
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
desc='no_full_loss', # without sterling approx
),
dict(
module_name='PoissonNLLLoss',
constructor_args=(False,),
input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
desc='full_loss', # with sterling approx
),
dict(
module_name='L1Loss',
input_size=(),
target_size=(),
reference_fn=lambda i, t, _: 1. / i.numel() * (i - t).abs().sum(),
desc='scalar',
),
dict(
module_name='KLDivLoss',
input_fn=lambda: torch.rand(()).log(),
target_fn=lambda: torch.rand(()),
reference_fn=lambda i, t, m:
kldivloss_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
desc='scalar',
),
dict(
module_name='MSELoss',
input_size=(),
target_size=(),
reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() /
(i.numel() if get_reduction(m) == 'elementwise_mean' else 1)),
check_sum_reduction=True,
desc='scalar'
),
dict(
module_name='MSELoss',
input_fn=lambda: torch.ones(5, 68, 64, 64, dtype=torch.float) / 10,
target_fn=lambda: torch.zeros(5, 68, 64, 64, dtype=torch.float),
reference_fn=lambda i, t, m: ((i - t).abs().pow(2).sum() /
(i.numel() if get_reduction(m) == 'elementwise_mean' else 1)),
check_forward_only=True,
desc='prec',
),
dict(
module_name='BCELoss',
constructor_args_fn=lambda: (torch.rand(()),),
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.rand(()).gt(0).double(),
reference_fn=lambda i, t, m: -((t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)).sum() /
(i.numel() if get_reduction(m) == 'elementwise_mean' else 1),
desc='scalar_weights',
check_gradgrad=False,
),
dict(
module_name='HingeEmbeddingLoss',
constructor_args=(0.5,),
input_size=(),
target_fn=lambda: torch.randn(()).gt(0).double().mul_(2).sub(1),
desc='scalar_margin',
check_sum_reduction=True,
),
dict(
module_name='SmoothL1Loss',
input_size=(),
target_size=(),
check_sum_reduction=True,
reference_fn=lambda i, t, m:
smoothl1loss_reference(i, t, reduction=get_reduction(m)),
desc='scalar',
),
dict(
module_name='MultiLabelSoftMarginLoss',
constructor_args=(torch.rand(10),),
input_fn=lambda: torch.randn(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
reference_fn=lambda i, t, m: -((t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * get_weight(m)).sum() /
(i.numel() if get_reduction(m) == 'elementwise_mean' else 1),
desc='weights',
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name='CTCLoss',
constructor_args=(14,), # blank=14
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
dict(
module_name='CTCLoss',
desc='1d_target',
constructor_args=(14,), # blank=14
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
dict(
module_name='CTCLoss',
desc='2d_int_target',
constructor_args=(0,), # blank=0
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m:
ctcloss_reference(i, t, il, tl, blank=0, reduction=get_reduction(m)),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
convert_target=False,
),
]
def poissonnllloss_no_reduce_test():
t = torch.randn(10, 10)
return dict(
fullname='PoissonNLLLLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(10, 10),
pickle=False)
def bceloss_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
return dict(
fullname='BCELoss_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()),
check_gradgrad=False,
pickle=False)
def bceloss_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
return dict(
fullname='BCELoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()),
check_gradgrad=False,
pickle=False)
def bceloss_weights_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
weights = torch.rand(10)
return dict(
fullname='BCELoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
check_gradgrad=False,
pickle=False)
def bceloss_weights_no_reduce_scalar_test():
t = torch.randn(()).double()
weights = torch.rand(())
return dict(
fullname='BCELoss_weights_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
check_gradgrad=False,
pickle=False)
def bce_with_logistic_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
sigmoid = nn.Sigmoid()
return dict(
fullname='BCEWithLogitsLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),
check_gradgrad=False,
pickle=False)
def bce_with_logistic_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
sigmoid = nn.Sigmoid()
return dict(
fullname='BCEWithLogitsLoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, m: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()),
check_gradgrad=False,
pickle=False)
def kldivloss_with_target_no_reduce_test():
i = torch.rand(10, 10).log()
return dict(
fullname='KLDivLoss_with_target_no_reduce',
constructor=wrap_functional(
lambda t: F.kl_div(i.type_as(t), t, reduction='none')),
input_fn=lambda: torch.rand(10, 10),
reference_fn=lambda t, _:
loss_reference_fns['KLDivLoss'](i.type_as(t), t, reduction='none'),
pickle=False)
def kldivloss_no_reduce_test():
t = torch.randn(10, 10)
return dict(
fullname='KLDivLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(10, 10).log(),
reference_fn=lambda i, _:
loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),
pickle=False)
def kldivloss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname='KLDivLoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.rand(()).log(),
reference_fn=lambda i, _:
loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'),
pickle=False)
def l1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='L1Loss_no_reduce',
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(2, 3, 4),
reference_fn=lambda i, m: (i - t.type_as(i)).abs(),
pickle=False)
def l1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname='L1Loss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(()),
reference_fn=lambda i, m: (i - t.type_as(i)).abs(),
pickle=False)
def mseloss_no_reduce_test():
input_size = (2, 3, 4, 5)
target = torch.randn(*input_size)
return dict(
fullname='MSELoss_no_reduce',
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),
input_size=input_size,
reference_fn=lambda i, m: (i - target).pow(2),
pickle=False)
def mseloss_no_reduce_scalar_test():
input_size = ()
target = torch.randn(input_size)
return dict(
fullname='MSELoss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction='none')),
input_size=input_size,
reference_fn=lambda i, m: (i - target).pow(2),
pickle=False)
def nllloss_no_reduce_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(15, 10).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss_no_reduce_ignore_index_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
kwargs = {'ignore_index': 2, 'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(15, 10).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss_no_reduce_weights_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLoss_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss_no_reduce_weights_ignore_index_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none',
'ignore_index': 2}
return dict(
fullname='NLLLoss_no_reduce_weights_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))),
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss_no_reduce_weights_ignore_index_neg_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none',
'ignore_index': -1}
return dict(
fullname='NLLLoss_no_reduce_weights_ignore_index_neg',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
input=torch.rand(15, 10).add(1e-2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nllloss2d_no_reduce_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss2d_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs = {'ignore_index': 1, 'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nllloss2d_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLoss2d_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def nlllossNd_no_reduce_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs = {'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nlllossNd_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs = {'ignore_index': 1, 'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce_ignore_index',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)),
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs),
pickle=False)
def nlllossNd_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {'weight': weight.type_as(i), 'reduction': 'none'}
return dict(
fullname='NLLLossNd_no_reduce_weights',
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))),
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
reference_fn=lambda i, _:
loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)),
pickle=False)
def smoothl1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname='SmoothL1Loss_no_reduce',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(2, 3, 4),
reference_fn=lambda i, _:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),
pickle=False)
def smoothl1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname='SmoothL1Loss_no_reduce_scalar',
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(()),
reference_fn=lambda i, _:
loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'),
pickle=False)
def multilabelmarginloss_1d_no_reduce_test():
t = Variable(torch.rand(10).mul(10).floor().long())
return dict(
fullname='MultiLabelMarginLoss_1d_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
input_fn=lambda: torch.randn(10),
reference_fn=lambda i, _:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multilabelmarginloss_index_neg_test():
t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1))
return dict(
fullname='MultiLabelMarginLoss_index_neg',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, _:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multilabelmarginloss_no_reduce_test():
t = Variable(torch.rand(5, 10).mul(10).floor().long())
return dict(
fullname='MultiLabelMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, _:
loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def hingeembeddingloss_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname='HingeEmbeddingLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(10),
reference_fn=lambda i, _:
loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'),
check_sum_reduction=True,
pickle=False)
def hingeembeddingloss_margin_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname='HingeEmbeddingLoss_margin_no_reduce',
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')),
input_fn=lambda: torch.randn(10),
reference_fn=lambda i, _:
loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'),
check_sum_reduction=True,
pickle=False)
def softmarginloss_no_reduce_test():
t = torch.randn(5, 5)
return dict(
fullname='SoftMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(5, 5),
reference_fn=lambda i, _:
loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'),
pickle=False)
def multilabelsoftmarginloss_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
return dict(
fullname='MultiLabelSoftMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, m: -(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()),
check_gradgrad=False,
pickle=False)
def multilabelsoftmarginloss_weights_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
weights = torch.rand(10)
return dict(
fullname='MultiLabelSoftMarginLoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i),
weight=weights.type_as(i), reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, m: -((t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, _:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_1d_no_reduce_test():
t = torch.rand(1).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_1d_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')),
input_fn=lambda: torch.randn(10),
reference_fn=lambda i, _:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_p_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_p_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')),
input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2),
reference_fn=lambda i, _:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_margin_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname='MultiMarginLoss_margin_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, _:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),
margin=0.5, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
def multimarginloss_weights_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
weights = torch.rand(10)
return dict(
fullname='MultiMarginLoss_weights_no_reduce',
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i),
reduction='none')),
input_fn=lambda: torch.randn(5, 10),
reference_fn=lambda i, _:
loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(),
weight=weights, reduction='none'),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False)
new_module_tests = [
poissonnllloss_no_reduce_test(),
bceloss_no_reduce_test(),
bceloss_weights_no_reduce_test(),
bce_with_logistic_no_reduce_test(),
bceloss_no_reduce_scalar_test(),
bceloss_weights_no_reduce_scalar_test(),
bce_with_logistic_no_reduce_scalar_test(),
kldivloss_with_target_no_reduce_test(),
kldivloss_no_reduce_test(),
kldivloss_no_reduce_scalar_test(),
l1loss_no_reduce_test(),
l1loss_no_reduce_scalar_test(),
mseloss_no_reduce_test(),
mseloss_no_reduce_scalar_test(),
nllloss_no_reduce_test(),
nllloss_no_reduce_ignore_index_test(),
nllloss_no_reduce_weights_test(),
nllloss_no_reduce_weights_ignore_index_test(),
nllloss_no_reduce_weights_ignore_index_neg_test(),
nllloss2d_no_reduce_test(),
nllloss2d_no_reduce_weights_test(),
nllloss2d_no_reduce_ignore_index_test(),
nlllossNd_no_reduce_test(),
nlllossNd_no_reduce_weights_test(),
nlllossNd_no_reduce_ignore_index_test(),
smoothl1loss_no_reduce_test(),
smoothl1loss_no_reduce_scalar_test(),
multilabelmarginloss_1d_no_reduce_test(),
multilabelmarginloss_index_neg_test(),
multilabelmarginloss_no_reduce_test(),
hingeembeddingloss_no_reduce_test(),
hingeembeddingloss_margin_no_reduce_test(),
softmarginloss_no_reduce_test(),
multilabelsoftmarginloss_no_reduce_test(),
multilabelsoftmarginloss_weights_no_reduce_test(),
multimarginloss_no_reduce_test(),
multimarginloss_1d_no_reduce_test(),
multimarginloss_p_no_reduce_test(),
multimarginloss_margin_no_reduce_test(),
multimarginloss_weights_no_reduce_test(),
dict(
module_name='BatchNorm1d',
constructor_args=(10,),
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='affine',
),
dict(
module_name='BatchNorm1d',
constructor_args=(5,),
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc='3d_input',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, None),
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='affine_simple_average',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, 0.3, False),
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm1d',
constructor_args=(10, 1e-3, 0.3, True, False),
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='BatchNorm1d',
constructor_args=(5, 1e-3, 0.3, False),
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc='3d_input_not_affine',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3,),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, None),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='2d_simple_average',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='momentum',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8, False),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm2d',
constructor_args=(3, 1e-3, 0.8, True, False),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3,),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, None),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='3d_simple_average',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='momentum',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7, False),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='not_affine',
),
dict(
module_name='BatchNorm3d',
constructor_args=(3, 1e-3, 0.7, True, False),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='not_tracking_stats',
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3),
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm1d',
constructor_args=(3, 1e-3, 0.3, False, True),
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm2d',
constructor_args=(3, 1e-3, 0.3, False, True),
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
),
dict(
module_name='InstanceNorm3d',
constructor_args=(3, 1e-3, 0.3, False, True),
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc='tracking_stats',
),
dict(
module_name='LayerNorm',
constructor_args=([5], 1e-3),
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc='1d_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([5], 1e-3, False),
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc='1d_no_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([2, 2, 5], 1e-3),
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
desc='3d_elementwise_affine',
),
dict(
module_name='LayerNorm',
constructor_args=([2, 2, 5], 1e-3, False),
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
desc='3d_no_elementwise_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(3, 6, 1e-3),
input_size=(4, 6, 5),
cudnn=True,
check_eval=True,
desc='1d_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(5, 5, 1e-3, False),
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc='1d_no_affine_IN', # this setting is equivalent with InstanceNorm
),
dict(
module_name='GroupNorm',
constructor_args=(1, 5, 1e-3, False),
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc='1d_no_affine_LN', # this setting is equivalent with LayerNorm
),
dict(
module_name='GroupNorm',
constructor_args=(3, 6, 1e-3),
input_size=(4, 6, 2, 3),
cudnn=True,
check_eval=True,
desc='2d_affine',
),
dict(
module_name='GroupNorm',
constructor_args=(3, 3, 1e-3, False),
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
desc='2d_no_affine_IN', # this setting is equivalent with InstanceNorm
),
dict(
module_name='GroupNorm',
constructor_args=(1, 3, 1e-3, False),
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
desc='2d_no_affine_LN', # this setting is equivalent with LayerNorm
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3),
input_size=(2, 4, 10),
cudnn=True,
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3, 2),
input_size=(2, 4, 10),
cudnn=True,
desc='stride',
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 3, 1, 1),
input_size=(2, 4, 10),
cudnn=True,
desc='pad1'
),
dict(
module_name='Conv1d',
constructor_args=(4, 5, 5, 1, 2),
input_size=(2, 4, 10),
cudnn=True,
desc='pad2'
),
dict(
module_name='Conv1d',
constructor_args=(4, 4, 3, 1, 1),
input_size=(1, 4, 1),
cudnn=True,
desc='pad1size1'
),
dict(
module_name='Conv1d',
constructor_args=(4, 4, 5, 1, 2),
input_size=(1, 4, 1),
cudnn=True,
desc='pad2size1'
),
dict(
fullname='Conv1d_dilated',
constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2),
input_size=(2, 4, 10),
),
dict(
fullname='Conv1d_groups',
constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2),
input_size=(2, 4, 6),
cudnn=True,
),
dict(
fullname='ConvTranspose1d',
constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)),
cudnn=True,
input_size=(1, 3, 7),
),
dict(
module_name='ConvTranspose1d',
constructor_args=(3, 4, 3, 2, 1, 1, 1, False),
input_size=(1, 3, 6),
cudnn=True,
desc='no_bias',
),
dict(
module_name='ConvTranspose1d',
constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2),
input_size=(1, 3, 6),
cudnn=True,
desc='dilated',
),
dict(
fullname='ConvTranspose1d_groups',
constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2),
cudnn=True,
input_size=(2, 4, 7),
),
dict(
module_name='MaxPool1d',
constructor_args=(4,),
input_size=(2, 10, 4),
),
dict(
module_name='MaxPool1d',
constructor_args=(4, 4),
input_size=(2, 10, 4),
desc='stride',
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 2)),
input_size=(2, 3, 7, 5),
cudnn=True,
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 3), (2, 2)),
input_size=(2, 3, 6, 6),
cudnn=True,
desc='strided',
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)),
input_size=(2, 3, 6, 6),
cudnn=True,
desc='padding',
),
dict(
module_name='Conv2d',
constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)),
input_size=(2, 3, 8, 8),
cudnn=True,
desc='dilated',
),
dict(
module_name='Conv2d',
constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False),
input_size=(2, 3, 6, 5),
cudnn=True,
desc='no_bias',
),
dict(
fullname='Conv2d_groups',
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
input_size=(2, 4, 6, 5),
cudnn=True,
),
dict(
fullname='Conv2d_groups_thnn',
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
input_size=(2, 4, 6, 5),
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)),
cudnn=True,
input_size=(1, 3, 7, 6),
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)),
input_size=(1, 3, 6, 7),
cudnn=True,
desc='dilated',
),
dict(
module_name='ConvTranspose2d',
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False),
input_size=(1, 3, 6, 7),
cudnn=True,
desc='no_bias',
),
dict(
fullname='ConvTranspose2d_groups',
constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2),
input_size=(1, 2, 4, 5),
cudnn=True,
),
dict(
fullname='Conv2d_depthwise',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4),
input_size=(2, 4, 6, 6),
),
dict(
fullname='Conv2d_depthwise_with_multiplier',
constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4),
input_size=(2, 4, 6, 6),
),
dict(
fullname='Conv2d_depthwise_strided',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4),
input_size=(2, 4, 6, 6),
),
dict(
fullname='Conv2d_depthwise_padded',
constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4),
input_size=(2, 4, 6, 6),
),
dict(
fullname='Conv2d_depthwise_dilated',
constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4),
input_size=(2, 4, 5, 5),
),
dict(
module_name='MaxPool2d',
constructor_args=((3, 3), (2, 2), (1, 1)),
input_size=(1, 3, 7, 7),
),
dict(
module_name='AvgPool1d',
constructor_args=(2,),
input_size=(2, 3, 6),
),
dict(
module_name='AvgPool1d',
constructor_args=((2,), (2,)),
input_size=(2, 3, 6),
desc='stride',
),
dict(
module_name='AvgPool1d',
constructor_args=(2, 2, 1),
input_size=(2, 3, 6),
desc='stride_pad',
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2),),
input_size=(2, 3, 6, 6),
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2), (2, 2)),
input_size=(2, 3, 6, 6),
desc='stride',
),
dict(
module_name='AvgPool2d',
constructor_args=((2, 2), (2, 2), (1, 1)),
input_size=(2, 3, 6, 6),
desc='stride_pad',
),
dict(
module_name='LPPool2d',
constructor_args=(2, (2, 2), 2),
input_size=(1, 3, 7, 7),
),
dict(
module_name='LPPool2d',
constructor_args=(1.5, 2),
input_fn=lambda: torch.rand(1, 3, 7, 7),
desc='norm',
),
dict(
module_name='LPPool1d',
constructor_args=(1.5, 2),
input_fn=lambda: torch.rand(1, 3, 7),
desc='norm',
),
dict(
module_name='LPPool1d',
constructor_args=(2, 2, 3),
input_size=(1, 3, 7),
),
dict(
module_name='LocalResponseNorm',
constructor_args=(3, ),
input_size=(1, 5, 7),
desc='1d'
),
dict(
module_name='LocalResponseNorm',
constructor_args=(2, ),
input_size=(1, 5, 7, 7),
desc='2d_uneven_pad'
),
dict(
module_name='LocalResponseNorm',
constructor_args=(1, 1, 0.5, 2),
input_size=(1, 5, 7, 7, 7),
desc='3d_custom_params'
),
dict(
module_name='ReflectionPad1d',
constructor_args=((1, 2),),
input_size=(2, 3, 8),
),
dict(
module_name='ReflectionPad2d',
constructor_args=((1, 2, 3, 4),),
input_size=(2, 3, 8, 8),
),
dict(
module_name='ReplicationPad1d',
constructor_args=((1, 2),),
input_size=(2, 3, 4),
),
dict(
module_name='ReplicationPad2d',
constructor_args=((1, 2, 3, 4),),
input_size=(2, 3, 4, 4),
),
dict(
module_name='ZeroPad2d',
constructor_args=((1, 2, 3, 4),),
input_size=(2, 3, 4, 4)
),
dict(
module_name='ZeroPad2d',
constructor_args=((-1, -1, -1, -2),),
input_size=(2, 3, 4, 4),
desc='negative_dims'
),
dict(
module_name='ConstantPad1d',
constructor_args=((1, 2), 2),
input_size=(2, 3, 4)
),
dict(
module_name='ConstantPad2d',
constructor_args=((1, 2, 3, 4), 2),
input_size=(2, 3, 4, 4)
),
dict(
module_name='ConstantPad3d',
constructor_args=((1, 2, 3, 4, 1, 0), 2),
input_size=(2, 3, 4, 4, 5)
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, (2, 3, 4)),
input_size=(2, 3, 3, 4, 5),
cudnn=True,
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, (2, 3, 4), 1, 0, 1, 1, False),
input_size=(2, 3, 3, 4, 5),
cudnn=True,
desc='no_bias',
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, 2, 2),
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc='stride',
),
dict(
module_name='Conv3d',
constructor_args=(3, 4, 2, 2, 1),
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc='stride_padding',
),
dict(
fullname='Conv3d_groups',
constructor=lambda: nn.Conv3d(4, 6, kernel_size=3, groups=2),
input_size=(2, 4, 4, 5, 4),
cudnn=True,
),
dict(
fullname='Conv3d_dilated',
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2),
input_size=(2, 3, 5, 5, 5),
),
dict(
fullname='Conv3d_dilated_strided',
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2),
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name='ConvTranspose3d',
constructor_args=(2, 3, (2, 3, 2)),
cudnn=True,
input_size=(1, 2, 4, 5, 4),
),
dict(
module_name='ConvTranspose3d',
constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)),
cudnn=True,
input_size=(1, 2, 4, 5, 4),
desc='dilated',
),
dict(
module_name='MaxPool3d',
constructor_args=((2, 2, 2),),
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name='MaxPool3d',
constructor_args=(2, (2, 2, 2)),
input_size=(2, 3, 5, 5, 5),
desc='stride',
),
dict(
module_name='MaxPool3d',
constructor_args=(2, 2, (1, 1, 1)),
input_size=(2, 3, 5, 5, 5),
desc='stride_padding',
),
dict(
module_name='AvgPool3d',
constructor_args=((2, 2, 2),),
input_size=(2, 3, 4, 4, 4),
),
dict(
module_name='AvgPool3d',
constructor_args=(2, (2, 2, 2)),
input_size=(2, 3, 5, 5, 5),
desc='stride',
),
dict(
module_name='AvgPool3d',
constructor_args=(2, 2, (1, 1, 1)),
input_size=(2, 3, 5, 5, 5),
desc='stride_pad',
),
dict(
module_name='AvgPool3d',
constructor_args=(4, 2, (1, 2, 1)),
input_size=(2, 3, 5, 5, 5),
desc='stride_pad_gpu_fixedkw_output',
),
dict(
module_name='AvgPool3d',
constructor_args=((2, 4, 8), 1, (1, 1, 2)),
input_size=(2, 3, 2, 4, 8),
desc='stride_pad_gpu_general_output',
),
dict(
module_name='AvgPool3d',
constructor_args=(3, 1, 0),
input_size=(2, 3, 4, 4, 4),
desc='stride1_pad0_gpu_input',
),
dict(
module_name='AvgPool3d',
constructor_args=(2, 2, (1, 1, 1)),
input_size=(2, 3, 4, 4, 4),
desc='stride_pad_gpu_input_nooverlap',
),
dict(
module_name='ReplicationPad3d',
constructor_args=((1, 2, 3, 4, 5, 6),),
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name='Embedding',
constructor_args=(4, 3),
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3),
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
desc='mean',
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3, None, 2, False, 'sum'),
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
desc='sum',
),
dict(
module_name='EmbeddingBag',
constructor_args=(4, 3, None, 2, False, 'max'),
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
desc='max',
),
dict(
fullname='EmbeddingBag_sparse',
constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True),
input_fn=lambda: torch.randperm(2).repeat(1, 2),
jacobian_input=False,
check_gradgrad=False,
),
dict(
constructor=lambda: nn.Embedding(4, 3, sparse=True),
input_fn=lambda: torch.randperm(2).repeat(1, 2),
jacobian_input=False,
fullname='Embedding_sparse',
check_gradgrad=False,
),
dict(
constructor=lambda: nn.FractionalMaxPool2d(
2, output_ratio=0.5, _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
input_size=(1, 3, 5, 5),
fullname='FractionalMaxPool2d_ratio',
),
dict(
constructor=lambda: nn.FractionalMaxPool2d((2, 2), output_size=(
4, 4), _random_samples=torch.DoubleTensor(1, 3, 2).uniform_()),
input_size=(1, 3, 7, 7),
fullname='FractionalMaxPool2d_size',
test_cuda=False,
),
dict(
module_name='PixelShuffle',
constructor_args=(3,),
input_size=(1, 9, 4, 4),
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'nearest'),
input_size=(1, 2, 4),
desc='nearest_1d',
),
dict(
module_name='Upsample',
constructor_args=((12, ), None, 'nearest'),
input_size=(1, 2, 3),
desc='nearest_tuple_1d',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'nearest'),
input_size=(1, 2, 4),
desc='nearest_scale_1d',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'linear', False),
input_size=(1, 2, 4),
desc='linear_1d',
),
dict(
module_name='Upsample',
constructor_args=((4, ), None, 'linear', False),
input_size=(1, 2, 3),
desc='linear_tuple_1d',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'linear', False),
input_size=(1, 2, 4),
desc='linear_scale_1d',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'linear', True),
input_size=(1, 2, 4),
desc='linear_1d_align_corners',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'linear', True),
input_size=(1, 2, 4),
desc='linear_scale_1d_align_corners',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'nearest'),
input_size=(1, 2, 4, 4),
desc='nearest_2d',
),
dict(
module_name='Upsample',
constructor_args=((12, 16), None, 'nearest'),
input_size=(1, 2, 3, 4),
desc='nearest_tuple_2d',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'nearest'),
input_size=(1, 2, 4, 4),
desc='nearest_scale_2d',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'bilinear', False),
input_size=(1, 2, 4, 4),
desc='bilinear_2d',
),
dict(
module_name='Upsample',
constructor_args=((4, 6), None, 'bilinear', False),
input_size=(1, 2, 2, 3),
desc='bilinear_tuple_2d',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'bilinear', False),
input_size=(1, 2, 4, 4),
desc='bilinear_scale_2d',
),
dict(
module_name='Upsample',
constructor_args=(None, (2, 2), 'bilinear', False),
input_size=(1, 2, 4, 4),
desc='bilinear_scale_tuple_shared_2d',
),
dict(
module_name='Upsample',
constructor_args=(None, (2, 1), 'bilinear', False),
input_size=(1, 2, 4, 4),
desc='bilinear_scale_tuple_skewed_2d',
),
dict(
module_name='Upsample',
constructor_args=((4, 6), None, 'bilinear', True),
input_size=(1, 2, 4, 4),
desc='bilinear_tuple_2d_align_corners',
),
dict(
module_name='Upsample',
constructor_args=(None, (2, 1), 'bilinear', True),
input_size=(1, 2, 4, 4),
desc='bilinear_scale_tuple_skewed_2d_align_corners',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'nearest'),
input_size=(1, 2, 4, 4, 4),
desc='nearest_3d',
),
dict(
module_name='Upsample',
constructor_args=((12, 16, 16), None, 'nearest'),
input_size=(1, 2, 3, 4, 4),
desc='nearest_tuple_3d',
),
dict(
module_name='Upsample',
constructor_args=(None, 4, 'nearest'),
input_size=(1, 2, 4, 4, 4),
desc='nearest_scale_3d',
),
dict(
module_name='Upsample',
constructor_args=(12, None, 'trilinear', False),
input_size=(1, 2, 4, 4, 4),
desc='trilinear_3d',
),
dict(
module_name='Upsample',
constructor_args=((4, 6, 6), None, 'trilinear', False),
input_size=(1, 2, 2, 3, 3),
desc='trilinear_tuple_3d',
),
dict(
module_name='Upsample',
constructor_args=(None, 3, 'trilinear', False),
input_size=(1, 2, 3, 4, 4),
desc='trilinear_scale_3d',
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
),
dict(
module_name='Upsample',
constructor_args=((4, 6, 6), None, 'trilinear', True),
input_size=(1, 2, 2, 3, 3),
desc='trilinear_tuple_3d_align_corners',
),
dict(
module_name='Upsample',
constructor_args=(None, 3, 'trilinear', True),
input_size=(1, 2, 3, 4, 4),
desc='trilinear_scale_3d_align_corners',
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
),
dict(
module_name='AdaptiveMaxPool1d',
constructor_args=(3,),
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5),
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=(3,),
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='single',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=((3, 4),),
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='tuple',
),
dict(
module_name='AdaptiveMaxPool2d',
constructor_args=((3, None),),
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc='tuple_none',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=(3,),
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='single',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, 4, 5),),
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='tuple',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, None, 5),),
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc='tuple_none',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=(3,),
input_fn=lambda: _rand_tensor_non_equal(2, 3, 12, 9, 3),
desc='single_nonatomic',
),
dict(
module_name='AdaptiveMaxPool3d',
constructor_args=((3, 4, 5),),
input_fn=lambda: _rand_tensor_non_equal(2, 3, 6, 4, 10),
desc='tuple_nonatomic',
),
dict(
module_name='AdaptiveAvgPool1d',
constructor_args=(3,),
input_fn=lambda: torch.rand(1, 3, 5),
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=(3,),
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='single',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=((3, 4),),
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='tuple',
),
dict(
module_name='AdaptiveAvgPool2d',
constructor_args=((3, None),),
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc='tuple_none',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=(3,),
input_fn=lambda: torch.rand(2, 3, 5, 2, 7),
desc='single',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=((3, 4, 5),),
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc='tuple',
),
dict(
module_name='AdaptiveAvgPool3d',
constructor_args=((None, 4, 5),),
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc='tuple_none',
),
dict(
module_name='SELU',
input_size=(3, 2, 5),
check_inplace=True
),
dict(
module_name='SELU',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='CELU',
input_size=(3, 2, 5),
constructor_args=(2.,),
check_inplace=True,
reference_fn=lambda x, _: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1))
),
dict(
module_name='CELU',
input_size=(),
constructor_args=(2.,),
check_inplace=True,
reference_fn=lambda x, _: torch.where(x >= 0, x, 2. * ((.5 * x).exp() - 1)),
desc='scalar'
),
dict(
module_name='GLU',
input_size=(5, 6),
),
dict(
module_name='GLU',
constructor_args=(1,),
input_size=(5, 6, 7),
desc='dim'
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname='softmax_lastdim',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname='softmax_spatial_special',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname='softmax_spatial',
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=0),
input_size=(2, 3, 4, 5),
fullname='softmax_functional_dim0',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=3),
input_size=(2, 3, 4, 5),
fullname='softmax_functional_dim3',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
input_size=(),
fullname='softmax_functional_scalar',
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=-1),
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname='log_softmax_lastdim',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname='log_softmax_spatial_special',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname='log_softmax_spatial',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
input_size=(2, 3, 4, 5),
fullname='log_softmax_dim0',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=3),
input_size=(2, 3, 4, 5),
fullname='log_softmax_dim3',
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
input_size=(),
fullname='log_softmax_scalar',
pickle=False,
),
dict(
fullname='Unfold',
constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)),
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold',
constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)),
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Unfold_int_input',
constructor=lambda: nn.Unfold(2, 1, 0, 1),
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname='Fold_int_input',
constructor=lambda: nn.Fold(3, 2, 1, 0, 1),
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
module_name='Threshold',
constructor_args=(2, 1),
input_size=(),
check_inplace=True,
desc='threshold_value_scalar'
),
dict(
module_name='ReLU',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='ReLU6',
input_size=(),
check_inplace=True,
desc='scalar'
),
dict(
module_name='RReLU',
constructor_args=(0.1, 0.9),
input_size=(),
desc='with_up_down_scalar',
test_cuda=False,
),
dict(
module_name='Hardtanh',
input_size=(),
reference_fn=lambda i, _: i.clamp(-1, 1),
desc='scalar'
),
dict(
module_name='Sigmoid',
input_size=(),
desc='scalar',
),
dict(
module_name='Tanh',
input_size=(),
desc='scalar',
),
dict(
module_name='Softmax',
constructor_args=(0,),
input_size=(),
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(0, True)),
desc='scalar',
),
dict(
module_name='LogSoftmax',
constructor_args=(0,),
input_size=(),
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(),
desc='multiparam_scalar',
),
dict(
module_name='ELU',
constructor_args=(2.,),
input_size=(),
desc='scalar',
),
dict(
module_name='Hardshrink',
constructor_args=(2.,),
input_size=(),
desc='scalar',
),
dict(
module_name='LeakyReLU',
constructor_args=(0.5,),
input_size=(),
check_inplace=True,
desc='with_negval_scalar'
),
dict(
module_name='LogSigmoid',
input_size=(),
reference_fn=lambda i, _: i.sigmoid().log(),
desc='scalar'
),
dict(
module_name='Softplus',
constructor_args=(2, -100),
input_size=(),
reference_fn=(lambda i, _: ((i * 2) > -100).type_as(i) * i +
((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))),
desc='beta_threshold_scalar',
),
dict(
module_name='Softshrink',
constructor_args=(1,),
input_size=(),
desc='lambda_scalar',
),
dict(
module_name='PReLU',
input_size=(),
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
desc='scalar',
),
dict(
module_name='Softsign',
input_size=(),
reference_fn=lambda i, _: i.div(1 + torch.abs(i)),
desc='scalar',
),
dict(
module_name='Softmin',
constructor_args=(0,),
input_size=(),
desc='scalar',
),
dict(
module_name='Tanhshrink',
input_size=(),
desc='scalar',
),
]
for test_params in module_tests + new_module_tests:
# TODO: CUDA is not implemented yet
if 'constructor' not in test_params:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
decorator = test_params.pop('decorator', None)
test = NewModuleTest(**test_params)
add_test(test, decorator)
if 'check_eval' in test_params:
# create a new test that is identical but that sets module.training to False
desc = test_params.get('desc', None)
test_params['desc'] = 'eval' if desc is None else desc + '_eval'
def gen_eval_constructor(constructor):
def eval_constructor(*args, **kwargs):
cons = constructor(*args, **kwargs)
cons.training = False
return cons
eval_constructor.__name__ = constructor.__name__
return eval_constructor
test_params['constructor'] = gen_eval_constructor(test_params['constructor'])
test = NewModuleTest(**test_params)
add_test(test, decorator)
for test_params in criterion_tests + new_criterion_tests:
name = test_params.pop('module_name')
test_params['constructor'] = getattr(nn, name)
test = NewCriterionTest(**test_params)
decorator = test_params.pop('decorator', None)
add_test(test, decorator)
if 'check_sum_reduction' in test_params:
desc = test_params.get('desc', None)
test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'
def gen_sum_reduction_constructor(constructor):
def sum_reduction_constructor(*args, **kwargs):
cons = constructor(*args, reduction='sum', **kwargs)
return cons
sum_reduction_constructor.__name__ = constructor.__name__
return sum_reduction_constructor
test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])
test = NewCriterionTest(**test_params)
add_test(test, decorator)
class UnpoolingNet(nn.Module):
def __init__(self, pool, unpool):
super(UnpoolingNet, self).__init__()
self.pool = pool
self.unpool = unpool
def forward(self, input):
return self.unpool(*self.pool(input))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool1d(2, return_indices=True),
nn.MaxUnpool1d(2)),
input_size=(1, 1, 4),
fullname='MaxUnpool1d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool2d(2, return_indices=True),
nn.MaxUnpool2d(2)),
input_size=(1, 1, 2, 4),
fullname='MaxUnpool2d_net',))
add_test(NewModuleTest(
constructor=lambda: UnpoolingNet(
nn.MaxPool3d(2, return_indices=True),
nn.MaxUnpool3d(2)),
input_size=(1, 1, 2, 4, 6),
fullname='MaxUnpool3d_net',
check_gradgrad=False,))
class _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):
def __call__(self, input):
t = torch.tensor([0, 1, 4, 8]).to(input.device)
return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output
add_test(NewModuleTest(
constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),
input_size=(4, 16),
fullname='AdaptiveLogSoftmax'))
num_shards = os.environ.get('TEST_NN_NUM_SHARDS', None)
shard = os.environ.get('TEST_NN_SHARD', None)
if num_shards is not None and shard is not None:
num_shards = int(num_shards)
shard = int(shard)
def load_tests(loader, tests, pattern):
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
hash_id = int(hashlib.sha256(str(test).encode('utf-8')).hexdigest(), 16)
if hash_id % num_shards == shard:
test_suite.addTest(test)
return test_suite
if __name__ == '__main__':
run_tests()
|
import os
import sys
import inquirer
from colorama import Fore, Style
from .constants import ProjInfo
def print_blue(text):
print(Fore.BLUE + text + Style.RESET_ALL)
def print_red(text):
print(Fore.RED + text + Style.RESET_ALL)
def print_yellow(text):
print(Fore.YELLOW + text + Style.RESET_ALL)
def print_green(text):
print(Fore.GREEN + text + Style.RESET_ALL)
def print_blue_bold(text):
print(Fore.BLUE + Style.BRIGHT + text + Style.RESET_ALL)
def print_red_bold(text):
print(Fore.RED + Style.BRIGHT + text + Style.RESET_ALL)
def print_yellow_bold(text):
print(Fore.YELLOW + Style.BRIGHT + text + Style.RESET_ALL)
def print_green_bold(text):
print(Fore.GREEN + Style.BRIGHT + text + Style.RESET_ALL)
def print_path_blue(text, path):
print(Fore.BLUE + Style.BRIGHT + text, Style.NORMAL + path + Style.RESET_ALL)
def print_path_red(text, path):
print(Fore.RED + Style.BRIGHT + text, Style.NORMAL + path + Style.RESET_ALL)
def print_path_yellow(text, path):
print(Fore.YELLOW + Style.BRIGHT + text, Style.NORMAL + path + Style.RESET_ALL)
def print_path_green(text, path):
print(Fore.GREEN + Style.BRIGHT + text, Style.NORMAL + path + Style.RESET_ALL)
def print_dry_run_copy_info(source, dest):
"""Show source -> dest copy. Replaces expanded ~ with ~ if it's at the beginning of paths.
source and dest are trimmed in the middle if needed. Removed characters will be replaced by ...
:param source: Can be of type str or Path
:param dest: Can be of type str or Path
"""
def shorten_home(path):
expanded_home = os.path.expanduser("~")
path = str(path)
if path.startswith(expanded_home):
return path.replace(expanded_home, "~")
return path
def truncate_middle(path: str, acceptable_len: int):
"""Middle truncate a string
https://www.xormedia.com/string-truncate-middle-with-ellipsis/
"""
if len(path) <= acceptable_len:
return path
# half of the size, minus the 3 .'s
n_2 = int(acceptable_len / 2 - 3)
# whatever's left
n_1 = int(acceptable_len - n_2 - 3)
return f"{path[:n_1]}...{path[-n_2:]}"
trimmed_source = shorten_home(source)
trimmed_dest = shorten_home(dest)
longest_allowed_path_len = 87
if len(trimmed_source) + len(trimmed_dest) > longest_allowed_path_len:
trimmed_source = truncate_middle(trimmed_source, longest_allowed_path_len)
trimmed_dest = truncate_middle(trimmed_dest, longest_allowed_path_len)
print(Fore.YELLOW + Style.BRIGHT + trimmed_source + Style.NORMAL, "->", Style.BRIGHT + trimmed_dest + Style.RESET_ALL)
def print_version_info(cli=True):
"""
Formats version differently for CLI and splash screen.
"""
version = "v{} by {} (@{})".format(ProjInfo.VERSION,
ProjInfo.AUTHOR_FULL_NAME,
ProjInfo.AUTHOR_GITHUB)
if not cli:
print(Fore.RED + Style.BRIGHT + "\t{}\n".format(version) + Style.RESET_ALL)
else:
print(version)
def splash_screen():
"""Display splash graphic, and then stylized version and author info."""
print(Fore.YELLOW + Style.BRIGHT + "\n" + ProjInfo.LOGO + Style.RESET_ALL)
print_version_info(False)
def print_section_header(title, color):
"""Prints variable sized section header."""
block = "#" * (len(title) + 2)
print("\n" + color + Style.BRIGHT + block)
print("#", title)
print(block + "\n" + Style.RESET_ALL)
def print_pkg_mgr_backup(mgr):
print("{}Backing up {}{}{}{}{} packages list...{}".format(Fore.BLUE, Style.BRIGHT, Fore.YELLOW, mgr,
Fore.BLUE, Style.NORMAL, Style.RESET_ALL))
def print_pkg_mgr_reinstall(mgr):
print("{}Reinstalling {}{}{}{}{}...{}".format(Fore.BLUE, Style.BRIGHT, Fore.YELLOW,
mgr, Fore.BLUE, Style.NORMAL, Style.RESET_ALL))
# TODO: BUG: Why does moving this to prompts.py cause circular imports?
def prompt_yes_no(message, color, invert=False):
"""
Print question and return True or False depending on user selection from list.
"""
questions = [inquirer.List('choice',
message=color + Style.BRIGHT + message + Fore.BLUE,
choices=(' No', ' Yes') if invert else (' Yes', ' No')
)
]
answers = inquirer.prompt(questions)
if answers:
return answers.get('choice').strip().lower() == 'yes'
else:
sys.exit(1)
|
<gh_stars>0
from model import Item
import inspect
import os
import random
import sqlite3
import string
CHARS = string.ascii_letters + string.digits + string.punctuation
PASSWORD_LENTH = 10
# create dir for db
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename)) + '\\res\\'
if not os.path.exists(path):
os.mkdir(path)
# connect to db
database = sqlite3.connect(path + 'passwords.db')
with database:
cursor = database.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS passwords (service TEXT, login TEXT, password TEXT)""")
def generate_password():
"""
create password given length
"""
global CHARS, PASSWORD_LENTH
password = ''
for _ in range(PASSWORD_LENTH):
password += random.choice(CHARS)
return password
def encrypt_password(password, key):
"""
crypt password with key, that given from user
"""
global CHARS
key_index = 0
new_password = ''
for i in key:
for j in range(len(CHARS)):
if i == CHARS[j]:
key_index += j
key_index /= len(key)
key_index = int(key_index)
for i in password: # first symbol in password
for j in range(len(CHARS)):
password_index = 0
if i == CHARS[j]:
password_index = j + key_index
if password_index >= len(CHARS):
password_index -= len(CHARS)
new_password += CHARS[password_index]
return new_password
def decrypt_password(password, key):
"""
decrypt password with key, that given from user
"""
global CHARS
key_index = 0
new_password = ''
for i in key:
for j in range(len(CHARS)):
if i == CHARS[j]:
key_index += j
key_index /= len(key)
key_index = int(key_index)
for i in password: # first symbol in password
for j in range(len(CHARS)):
password_index = 0
if i == CHARS[j]:
password_index = j - key_index
if password_index < 0:
password_index += len(CHARS)
new_password += CHARS[password_index]
return new_password
def get_exists_user_data(func_name):
"""
get service and login, that exists in db, from user
"""
global cursor
change = input('Вывести уч. записи y/n? ')
if change == 'y':
cursor.execute("""SELECT service FROM passwords""")
services = cursor.fetchall()
if not services:
print("Нет ни одной уч. записи\n")
return None
services = set(services)
for i in services:
print(i[0])
elif change == 'n':
pass
else:
print('Неверная команда')
return None
data = Item()
data.service = input('От чего пароль? ')
cursor.execute("""SELECT login FROM passwords WHERE service = ?""", (data.service,))
logins = cursor.fetchall()
# if not exists this login in db
if not logins:
print('Нет такой уч. записи\n')
return None
print('Уч. записи в этом сервисе:')
for i in logins:
print(i[0])
data.login = input('Логин ')
for i in logins:
if not data.login in i[0]:
print('Нет такой уч. записи\n')
return None
if delete_writing.__name__ == func_name:
data.key = None
else:
data.key = input('Ключ шифрования ')
# run current function
if not data.service or not data.login or not data.key:
print('Неверная команда')
return None
return data
def get_not_exists_user_data():
"""
get user data, that not exists in db
"""
global cursor
data = Item()
data.service = input('От чего пароль? ')
data.login = input('Логин ')
data.key = input('Ключ шифрования ')
if not data.service and not data.login:
print('Введите корректные данные\n')
return None
return data
def read_writing():
global cursor
data = get_exists_user_data(read_writing.__name__)
if data is None:
return
cursor.execute("""SELECT password FROM passwords WHERE service = ? AND login = ? """,
(data.service, data.login))
password = cursor.fetchone()[0]
decrypted_password = decrypt_password(password, data.key)
print(f'Пароль {decrypted_password}')
database.commit()
def add_writing():
global cursor
data = get_not_exists_user_data()
if data is None:
return
cursor.execute("""SELECT password FROM passwords WHERE service = ? AND login = ?""", (data.service, data.login))
if not cursor.fetchone() is None:
print('Такая уч. запись уже существует\n')
else:
password = generate_password()
try:
encrypted_password = encrypt_password(password, data.key)
except TypeError:
encrypted_password = password
cursor.execute("""INSERT INTO passwords(service, login, password) VALUES(?, ?, ?) """,
(data.service, data.login, encrypted_password))
print(f'Пароль для {data.service} - {password}')
database.commit()
def rewrite_writing():
global cursor
data = get_exists_user_data(rewrite_writing.__name__)
if data is None:
return
choice = input('Что поменять?(serv/log/pass) ')
if choice == 'serv':
new_service = input('Новое название сервиса ')
cursor.execute("""UPDATE passwords SET service = ? WHERE service = ? AND login = ? """,
(new_service, data.service, data.login))
print(f'Новое название сервиса - {new_service}')
elif choice == 'log':
new_login = input('Новый логин ')
cursor.execute("""UPDATE passwords SET login = ? WHERE service = ? AND login = ? """,
(new_login, data.service, data.login))
print(f'Новый логин - {new_login}')
elif choice == 'pass':
ans = input('Вы уверены y/n? ')
if ans == 'y':
password = generate_password()
try:
encrypted_password = encrypt_password(password, data.key)
except TypeError:
return
cursor.execute("""UPDATE passwords SET password = ? WHERE service = ? AND login = ? """,
(encrypted_password, data.service, data.login))
print(f'Новый пароль - {password}')
elif ans == 'n':
print('Отмена ')
return
else:
print('Неверная команда')
return
else:
print('Неверная команда ')
return
database.commit()
def delete_writing():
global cursor
data = get_exists_user_data(delete_writing.__name__)
if data is None:
return
choice = input('Вы уверены y/n? ')
if choice == 'y':
cursor.execute("""DELETE FROM passwords WHERE service = ? AND login = ?""",
(data.service, data.login))
print('Запись удалена')
elif choice == 'n':
print('Отмена ')
return
else:
print('Неверная команда')
database.commit()
def create_only_password():
"""
generate password without add it in db
"""
print(f'Новый пароль - {generate_password()}')
|
"""This module includes some general-purpose utility functions and classes
"""
import os
import shutil
import socket
import pickle
import logging
import time
from multiprocessing import Process, Queue
from queue import Empty
__all__ = [
"ParallerRunner",
"SocketCollector",
"TestDirectory",
]
class SocketSender:
"""Sends a pickle-serialized object via TCP socket.
Parameters
----------
address : str
The address of the server.
port : int
The TCP server's port.
"""
def __init__(self, address, port):
self._address = address
self._port = port
def send(self, obj):
"""Send the given object to the configured server.
Note that the socket is opened in this call and closed immediately
after the object has been sent.
Parameters
----------
obj
The object to be sent. Anything pickle-serializable is OK, though
its maximum (serialized) size must fit in an unsigned 32-bit int.
Raises
------
ValueError
If the object is too big.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self._address, self._port))
msg = pickle.dumps(obj)
if len(msg) >= 2 ** 32:
raise ValueError(
f"The serialization of the object is too big ({len(msg)} bytes)"
)
try:
hdr = int.to_bytes(len(msg), 4, byteorder="big", signed=False)
logging.info(
f"Sending object of size ({len(msg)}) to {self._address}:{self._port}"
)
sent = s.send(hdr + msg)
assert sent == (4 + len(msg))
finally:
s.close()
class SocketCollector:
"""Collects pickle-serialized objects sent via a TCP socket.
Parameters
----------
address : str
The address where to bind the listening socket.
port : int
The port where to bind the listing socket.
"""
def __init__(self, address, port):
self._address = address
self._port = port
def collect(self, expected):
"""Collect a given number of objects, then quit.
Parameters
----------
expected : int
The number of objects expected to be collected.
"""
ret = [] # the list of objects returned
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((self._address, self._port))
s.listen()
logging.info(
(
f"Listening on {self._address}:{self._port}, "
f"waiting for {expected} objects"
)
)
while len(ret) < expected:
clientsocket, address = s.accept()
logging.debug(f"Connection from {address} established")
try:
while len(ret) < expected:
hdr = clientsocket.recv(4)
if not hdr:
clientsocket.close()
break
msg_len = int.from_bytes(hdr, byteorder="big", signed=False)
buf = b""
total = 0
while total != msg_len:
msg = clientsocket.recv(msg_len - total)
buf += msg
total += len(msg)
assert len(buf) == msg_len
logging.info(
f"object received, {expected - len(ret) - 1} to go"
)
ret.append(pickle.loads(buf))
finally:
clientsocket.close()
finally:
s.close()
return ret
class SocketParallerRunner:
"""Run a given function in parallel and return the list of their return values.
Spawns a number of workers and synchronize their I/O via `Queue` and sockets.
Parameters
----------
address : str
The address where to bind the listening TCP socket.
port : int
The port where to bind the listing TCP socket.
"""
def __init__(self, address, port):
self._address = address
self._port = port
def _sub_func(self, qin, func):
"""Single worker called by `run()`."""
while True:
try:
args = qin.get_nowait()
except Empty:
return
SocketSender(self._address, self._port).send(func(args))
def run(self, nworkers, func, args):
"""Run a given function in parallel and return the list of their return values.
Spawns a number of workers and synchronize their I/O via `Queue` and sockets.
Parameters
----------
nworkers : int
The number of workers to spawn.
func : lambda
The function to call.
args : list
The list of arguments. The size of this list is the same as the number
of executions of the function.
Returns
-------
A list of items, one for each function invoked.
Raises
------
ValueError
If the number of workers is smaller than 1.
"""
if nworkers < 1:
raise ValueError(f"Invalid number of workers: {nworkers}")
qin = Queue()
for arg in args:
qin.put(arg)
processes = []
for _ in range(nworkers):
p = Process(target=SocketParallerRunner._sub_func, args=(self, qin, func))
p.start()
processes.append(p)
collector = SocketCollector(self._address, self._port)
ret = collector.collect(expected=len(args))
for p in processes:
p.join()
return ret
class ParallerRunner:
@staticmethod
def _sub_func(qin, qout, func):
"""Single worker called by `run()`."""
while True:
try:
args = qin.get_nowait()
except Empty:
return
qout.put(func(args))
@staticmethod
def run(nworkers, func, args):
"""Run a given function in parallel and return the list of their return values.
Parameters
----------
nworkers : int
The number of workers to spawn.
func : lambda
The function to call.
args : list
The list of arguments. The size of this list is the same as the number
of executions of the function.
Returns
-------
A list of items, one for each function invoked.
Raises
------
ValueError
If the number of workers is smaller than 1.
"""
if nworkers < 1:
raise ValueError(f"Invalid number of workers: {nworkers}")
qin = Queue()
for arg in args:
qin.put(arg)
qout = Queue()
# assert qin.qsize() == len(args)
processes = []
for _ in range(nworkers):
p = Process(target=ParallerRunner._sub_func, args=(qin, qout, func))
p.start()
processes.append(p)
for p in processes:
p.join()
ret = []
while not qout.empty():
ret.append(qout.get_nowait())
return ret
class TestDirectory:
"""Create a directory for tests that is removed upon exiting the context."""
def __init__(self):
self._path = "test_directory"
self._rmdir()
os.mkdir(self._path)
def __enter__(self):
return self._path
def __exit__(self, type, value, traceback):
self._rmdir()
pass
def _rmdir(self):
if os.path.exists(self._path):
shutil.rmtree(self._path)
class Chronometer:
"""Logs the time required to execute instructions."""
def __init__(self):
self._start = time.monotonic()
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
logging.debug(f"Elapsed time: {time.monotonic() - self._start}")
|
<reponame>InsightLab/pymove-osmnx
import numpy as np
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from pymove.core.dataframe import MoveDataFrame
from pymove_osmnx.utils.interpolate import (
check_time_dist,
feature_values_using_filter,
fix_time_not_in_ascending_order_all,
fix_time_not_in_ascending_order_id,
generate_distances,
)
list_data = [
[-3.779936, -38.679217, '2008-06-04 09:04:59', '1'],
[-3.779240, -38.678747, '2008-06-04 09:05:59', '1'],
[-3.778692, -38.678440, '2008-06-04 09:06:59', '1'],
[-3.778191, -38.678071, '2008-06-04 09:07:59', '1'],
[-3.779200, -38.675917, '2008-06-04 09:08:59', '1'],
]
def _default_move_df():
return MoveDataFrame(
data=[
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
)
move_df = MoveDataFrame(
data=list_data,
latitude=0,
longitude=1,
datetime=2,
traj_id=3,
)
def test_generate_distances():
move_distances = generate_distances(move_df)
cols = [
'lat',
'lon',
'datetime',
'id',
'edgeDistance',
'distFromTrajStartToCurrPoint'
]
expected = DataFrame(
data=[
[
-3.779936,
-38.679217,
Timestamp('2008-06-04 09:04:59'),
'1',
0.0,
0.0
],
[
-3.77924,
-38.678747,
Timestamp('2008-06-04 09:05:59'),
'1',
0.0,
0.0
],
[
-3.778692,
-38.67844,
Timestamp('2008-06-04 09:06:59'),
'1',
70.121,
70.121
],
[
-3.778191,
-38.678071,
Timestamp('2008-06-04 09:07:59'),
'1',
69.14,
139.261
],
[
-3.7792,
-38.675917,
Timestamp('2008-06-04 09:08:59'),
'1',
254.009,
393.27
]
],
columns=cols
)
assert_frame_equal(move_distances, expected)
assert len(move_distances) == 5
def test_check_time_dist():
try:
print(generate_distances(move_df))
check_time_dist(generate_distances(move_df), index_name = 'id')
except ValueError:
assert False
def test_fix_time_not_in_ascending_order_id():
time_ascending = generate_distances(move_df)
time_ascending = fix_time_not_in_ascending_order_id(
time_ascending, id_= '1', index_name = 'id'
)
times = time_ascending['datetime'].values
assert np.all(times[1:] > times[:-1])
def test_fix_time_not_in_ascending_order_all():
time_ascending = generate_distances(move_df)
b, c = time_ascending.iloc[1].copy(), time_ascending.iloc[3].copy()
time_ascending.iloc[3], time_ascending.iloc[1] = b, c
time_ascending = fix_time_not_in_ascending_order_all(
time_ascending, index_name = 'id'
)
times = time_ascending['datetime'].values
assert np.all(times[1:] > times[:-1])
|
"""Plays a list of files from the local filesystem, with interactive options."""
import logging
import os
import sys
from multiprocessing import Process
from os import chdir, name, path, scandir
from pathlib import Path
from random import choice, sample
from typing import List
from soco import SoCo # type: ignore
from soco_cli.m3u_parser import parse_m3u
from soco_cli.play_local_file import is_supported_type, play_local_file
from soco_cli.utils import error_report
def interaction_manager(speaker_ip: str) -> None:
sys.stdin = open(0)
speaker = SoCo(speaker_ip)
while True:
try:
# keypress = wait_for_keypress()
keypress = input("")[0]
except:
keypress = ""
if keypress in ["N", "n"]:
action = "NEXT"
print("Next track ...")
speaker.stop()
logging.info(
"Interactive mode: key = '{}', action = '{}'".format(keypress, action)
)
if keypress in ["P", "p"]:
action = "PAUSE"
print("Pause playback ...")
try:
speaker.pause()
except Exception as e:
logging.info("Exception ignored: {}".format(e))
logging.info(
"Interactive mode: key = '{}', action = '{}'".format(keypress, action)
)
if keypress in ["R", "r"]:
action = "RESUME"
print("Resume playback ...")
try:
speaker.play()
except Exception as e:
logging.info("Exception ignored: {}".format(e))
logging.info(
"Interactive mode: key = '{}', action = '{}'".format(keypress, action)
)
# Windows captures CTRL-C key-presses, so we handle them directly here
if name == "nt" and keypress == "\x03":
logging.info(
"Windows CTRL-C: Stopping speaker '{}' and exiting".format(
speaker.player_name
)
)
speaker.stop()
os._exit(0)
def play_file_list(speaker: SoCo, tracks: List[str], options: str = "") -> bool:
"""Play a list of files (tracks) with absolute pathnames."""
options = options.lower()
# Check for invalid options
invalid = set(options) - set("psri")
if invalid:
error_report("Invalid option(s) '{}' supplied".format(invalid))
return False
if options != "":
# Grab back stdout from api.run_command()
sys.stdout = sys.__stdout__
if "r" in options:
# Choose a single random track
track = choice(tracks)
tracks = [track]
logging.info("Choosing random track: {}".format(track))
elif "s" in options:
logging.info("Shuffling playlist")
# For some reason, 'shuffle(tracks)' does not work
tracks = sample(tracks, len(tracks))
# Interactive mode
keypress_process = None
if "i" in options:
print("Interactive mode actions: (N)ext, (P)ause, (R)esume + RETURN")
try:
logging.info("Interactive mode ... starting keypress process")
keypress_process = Process(
target=interaction_manager, args=(speaker.ip_address,), daemon=True
)
keypress_process.start()
logging.info("Process PID {} created".format(keypress_process.pid))
except Exception as e:
logging.info("Exception ignored: {}".format(e))
keypress_process = None
zero_pad = len(str(len(tracks)))
for index, track in enumerate(tracks):
if not path.exists(track):
print("Error: file not found:", track)
continue
if not is_supported_type(track):
print("Error: unsupported file type:", track)
continue
if "p" in options:
print(
"Playing {} of {}:".format(str(index + 1).zfill(zero_pad), len(tracks)),
track,
)
play_local_file(speaker, track)
if keypress_process:
keypress_process.terminate()
return True
def play_m3u_file(speaker: SoCo, m3u_file: str, options: str = "") -> bool:
if not path.exists(m3u_file):
error_report("File '{}' not found".format(m3u_file))
return False
logging.info("Parsing file contents'{}'".format(m3u_file))
track_list = parse_m3u(m3u_file)
if len(track_list) == 0:
error_report("No tracks found in '{}'".format(m3u_file))
return False
directory, _ = path.split(m3u_file)
if directory != "":
chdir(directory)
tracks = [str(Path(track.path).absolute()) for track in track_list] # type:ignore
logging.info("Files to to play: {}".format(tracks))
play_file_list(speaker, tracks, options)
return True
def play_directory_files(speaker: SoCo, directory: str, options: str = "") -> bool:
"""Play all the valid audio files in a directory. Ignores subdirectories"""
tracks = []
try:
with scandir(directory) as files:
for file in files:
if is_supported_type(file.name):
tracks.append(path.abspath(path.join(directory, file.name)))
except FileNotFoundError:
error_report("Directory '{}' not found".format(directory))
return False
tracks.sort()
logging.info("Files to to play: {}".format(tracks))
play_file_list(speaker, tracks, options)
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.