hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aca46ff3f64c067a1ed44471011ff67e12df9a07 | 18,195 | py | Python | setup.py | joachimmetz/pytsk | 1dfe7ad84a0b6e8b8bdc4e861a319bab6144c56f | [
"Apache-2.0"
] | 1 | 2021-11-15T13:35:20.000Z | 2021-11-15T13:35:20.000Z | setup.py | joachimmetz/pytsk | 1dfe7ad84a0b6e8b8bdc4e861a319bab6144c56f | [
"Apache-2.0"
] | null | null | null | setup.py | joachimmetz/pytsk | 1dfe7ad84a0b6e8b8bdc4e861a319bab6144c56f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2010, Michael Cohen <scudette@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install the pytsk python module.
You can control the installation process using the following environment
variables:
SLEUTHKIT_SOURCE: The path to the locally downloaded tarball of the
sleuthkit. If not specified we download from the internet.
SLEUTHKIT_PATH: A path to the locally build sleuthkit source tree. If not
specified we use SLEUTHKIT_SOURCE environment variable (above).
"""
from __future__ import print_function
import copy
import glob
import re
import os
import subprocess
import sys
import time
from setuptools import setup, Command, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.sdist import sdist
import distutils.ccompiler
from distutils import log
from distutils.ccompiler import new_compiler
from distutils.dep_util import newer_group
try:
from distutils.command.bdist_msi import bdist_msi
except ImportError:
bdist_msi = None
try:
from distutils.command.bdist_rpm import bdist_rpm
except ImportError:
bdist_rpm = None
import generate_bindings
import run_tests
version_tuple = (sys.version_info[0], sys.version_info[1])
if version_tuple < (3, 5):
print((
'Unsupported Python version: {0:s}, version 3.5 or higher '
'required.').format(sys.version))
sys.exit(1)
if not bdist_msi:
BdistMSICommand = None
else:
class BdistMSICommand(bdist_msi):
"""Custom handler for the bdist_msi command."""
def run(self):
"""Builds an MSI."""
# Make a deepcopy of distribution so the following version changes
# only apply to bdist_msi.
self.distribution = copy.deepcopy(self.distribution)
# bdist_msi does not support the library version so we add ".1"
# as a work around.
self.distribution.metadata.version += ".1"
bdist_msi.run(self)
if not bdist_rpm:
BdistRPMCommand = None
else:
class BdistRPMCommand(bdist_rpm):
"""Custom handler for the bdist_rpm command."""
def make_spec_file(self, spec_file):
"""Make an RPM Spec file."""
# Note that bdist_rpm can be an old style class.
if issubclass(BdistRPMCommand, object):
spec_file = super(BdistRPMCommand, self)._make_spec_file()
else:
spec_file = bdist_rpm._make_spec_file(self)
if sys.version_info[0] < 3:
python_package = 'python2'
else:
python_package = 'python3'
description = []
requires = ''
summary = ''
in_description = False
python_spec_file = []
for line in iter(spec_file):
if line.startswith('Summary: '):
summary = line
elif line.startswith('BuildRequires: '):
line = 'BuildRequires: {0:s}-setuptools, {0:s}-devel'.format(
python_package)
elif line.startswith('Requires: '):
requires = line[10:]
if python_package == 'python3':
requires = requires.replace('python-', 'python3-')
requires = requires.replace('python2-', 'python3-')
elif line.startswith('%description'):
in_description = True
elif line.startswith('python setup.py build'):
if python_package == 'python3':
line = '%py3_build'
else:
line = '%py2_build'
elif line.startswith('python setup.py install'):
if python_package == 'python3':
line = '%py3_install'
else:
line = '%py2_install'
elif line.startswith('%files'):
lines = [
'%files -n {0:s}-%{{name}}'.format(python_package),
'%defattr(644,root,root,755)',
'%license LICENSE',
'%doc README']
if python_package == 'python3':
lines.extend([
'%{_libdir}/python3*/site-packages/*.so',
'%{_libdir}/python3*/site-packages/pytsk3*.egg-info/*',
'',
'%exclude %{_prefix}/share/doc/*'])
else:
lines.extend([
'%{_libdir}/python2*/site-packages/*.so',
'%{_libdir}/python2*/site-packages/pytsk3*.egg-info/*',
'',
'%exclude %{_prefix}/share/doc/*'])
python_spec_file.extend(lines)
break
elif line.startswith('%prep'):
in_description = False
python_spec_file.append(
'%package -n {0:s}-%{{name}}'.format(python_package))
if python_package == 'python2':
python_spec_file.extend([
'Obsoletes: python-pytsk3 < %{version}',
'Provides: python-pytsk3 = %{version}'])
if requires:
python_spec_file.append('Requires: {0:s}'.format(requires))
python_spec_file.extend([
'{0:s}'.format(summary),
'',
'%description -n {0:s}-%{{name}}'.format(python_package)])
python_spec_file.extend(description)
elif in_description:
# Ignore leading white lines in the description.
if not description and not line:
continue
description.append(line)
python_spec_file.append(line)
return python_spec_file
def _make_spec_file(self):
"""Generates the text of an RPM spec file.
Returns:
list[str]: lines of text.
"""
return self.make_spec_file(
bdist_rpm._make_spec_file(self))
class BuildExtCommand(build_ext):
"""Custom handler for the build_ext command."""
def build_extension(self, extension):
"""Builds the extension.
Args:
extentsion: distutils extentsion object.
"""
if (extension.sources is None or
not isinstance(extension.sources, (list, tuple))):
raise errors.DistutilsSetupError((
'in \'ext_modules\' option (extension \'{0:s}\'), '
'\'sources\' must be present and must be '
'a list of source filenames').format(extension.name))
extension_path = self.get_ext_fullpath(extension.name)
depends = extension.sources + extension.depends
if not (self.force or newer_group(depends, extension_path, 'newer')):
log.debug('skipping \'%s\' extension (up-to-date)', extension.name)
return
log.info('building \'%s\' extension', extension.name)
# C and C++ source files need to be compiled seperately otherwise
# the extension will not build on Mac OS.
c_sources = []
cxx_sources = []
for source in extension.sources:
if source.endswith('.c'):
c_sources.append(source)
else:
cxx_sources.append(source)
objects = []
for lang, sources in (('c', c_sources), ('c++', cxx_sources)):
extra_args = extension.extra_compile_args or []
if lang == 'c++':
if self.compiler.compiler_type == 'msvc':
extra_args.append('/EHsc')
else:
extra_args.append('-std=c++14')
macros = extension.define_macros[:]
for undef in extension.undef_macros:
macros.append((undef,))
compiled_objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=extension.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=extension.depends)
objects.extend(compiled_objects)
self._built_objects = objects[:]
if extension.extra_objects:
objects.extend(extension.extra_objects)
extra_args = extension.extra_link_args or []
# When MinGW32 is used statically link libgcc and libstdc++.
if self.compiler.compiler_type == 'mingw32':
extra_args.extend(['-static-libgcc', '-static-libstdc++'])
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if extension.extra_objects:
objects.extend(extension.extra_objects)
extra_args = extension.extra_link_args or []
# Detect target language, if not provided
language = extension.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, extension_path,
libraries=self.get_libraries(extension),
library_dirs=extension.library_dirs,
runtime_library_dirs=extension.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(extension),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def configure_source(self, compiler):
"""Configures the source.
Args:
compiler: distutils compiler object.
"""
define_macros = [("HAVE_TSK_LIBTSK_H", "")]
if compiler.compiler_type == "msvc":
define_macros.extend([
("WIN32", "1"),
("UNICODE", "1"),
("NOMINMAX", "1"),
("_CRT_SECURE_NO_WARNINGS", "1")])
# TODO: ("GUID_WINDOWS", "1"),
else:
# We want to build as much as possible self contained Python
# binding.
command = [
"sh", "configure", "--disable-java", "--disable-multithreading",
"--without-afflib", "--without-libewf", "--without-libvhdi",
"--without-libvmdk", "--without-zlib"]
output = subprocess.check_output(command, cwd="sleuthkit")
print_line = False
for line in output.split(b"\n"):
line = line.rstrip()
if line == b"configure:":
print_line = True
if print_line:
if sys.version_info[0] >= 3:
line = line.decode("ascii")
print(line)
define_macros.extend([
("HAVE_CONFIG_H", "1"),
("LOCALEDIR", "\"/usr/share/locale\"")])
self.libraries = ["stdc++"]
self.define = define_macros
def run(self):
compiler = new_compiler(compiler=self.compiler)
# pylint: disable=attribute-defined-outside-init
self.configure_source(compiler)
libtsk_path = os.path.join("sleuthkit", "tsk")
if not os.access("pytsk3.cpp", os.R_OK):
# Generate the Python binding code (pytsk3.cpp).
libtsk_header_files = [
os.path.join(libtsk_path, "libtsk.h"),
os.path.join(libtsk_path, "base", "tsk_base.h"),
os.path.join(libtsk_path, "fs", "tsk_fs.h"),
os.path.join(libtsk_path, "img", "tsk_img.h"),
os.path.join(libtsk_path, "vs", "tsk_vs.h"),
"tsk3.h"]
print("Generating bindings...")
generate_bindings.generate_bindings(
"pytsk3.cpp", libtsk_header_files, initialization="tsk_init();")
build_ext.run(self)
class SDistCommand(sdist):
"""Custom handler for generating source dist."""
def run(self):
libtsk_path = os.path.join("sleuthkit", "tsk")
# sleuthkit submodule is not there, probably because this has been
# freshly checked out.
if not os.access(libtsk_path, os.R_OK):
subprocess.check_call(["git", "submodule", "init"])
subprocess.check_call(["git", "submodule", "update"])
if not os.path.exists(os.path.join("sleuthkit", "configure")):
raise RuntimeError(
"Missing: sleuthkit/configure run 'setup.py build' first.")
sdist.run(self)
class UpdateCommand(Command):
"""Update sleuthkit source.
This is normally only run by packagers to make a new release.
"""
_SLEUTHKIT_GIT_TAG = "4.11.1"
version = time.strftime("%Y%m%d")
timezone_minutes, _ = divmod(time.timezone, 60)
timezone_hours, timezone_minutes = divmod(timezone_minutes, 60)
# If timezone_hours is -1 %02d will format as -1 instead of -01
# hence we detect the sign and force a leading zero.
if timezone_hours < 0:
timezone_string = "-%02d%02d" % (-timezone_hours, timezone_minutes)
else:
timezone_string = "+%02d%02d" % (timezone_hours, timezone_minutes)
version_pkg = "%s %s" % (
time.strftime("%a, %d %b %Y %H:%M:%S"), timezone_string)
user_options = [("use-head", None, (
"Use the latest version of Sleuthkit checked into git (HEAD) instead of "
"tag: {0:s}".format(_SLEUTHKIT_GIT_TAG)))]
def initialize_options(self):
self.use_head = False
def finalize_options(self):
self.use_head = bool(self.use_head)
files = {
"sleuthkit/Makefile.am": [
("SUBDIRS = .+", "SUBDIRS = tsk"),
],
"class_parser.py": [
('VERSION = "[^"]+"', 'VERSION = "%s"' % version),
],
"dpkg/changelog": [
(r"pytsk3 \([^\)]+\)", "pytsk3 (%s-1)" % version),
("(<[^>]+>).+", r"\1 %s" % version_pkg),
],
}
def patch_sleuthkit(self):
"""Applies patches to the SleuthKit source code."""
for filename, rules in iter(self.files.items()):
filename = os.path.join(*filename.split("/"))
with open(filename, "r") as file_object:
data = file_object.read()
for search, replace in rules:
data = re.sub(search, replace, data)
with open(filename, "w") as fd:
fd.write(data)
patch_files = [
"sleuthkit-{0:s}-configure.ac".format(self._SLEUTHKIT_GIT_TAG)]
for patch_file in patch_files:
patch_file = os.path.join("patches", patch_file)
if not os.path.exists(patch_file):
print("No such patch file: {0:s}".format(patch_file))
continue
patch_file = os.path.join("..", patch_file)
subprocess.check_call(["git", "apply", patch_file], cwd="sleuthkit")
def run(self):
subprocess.check_call(["git", "stash"], cwd="sleuthkit")
subprocess.check_call(["git", "submodule", "init"])
subprocess.check_call(["git", "submodule", "update"])
print("Updating sleuthkit")
subprocess.check_call(["git", "reset", "--hard"], cwd="sleuthkit")
subprocess.check_call(["git", "clean", "-x", "-f", "-d"], cwd="sleuthkit")
subprocess.check_call(["git", "checkout", "master"], cwd="sleuthkit")
subprocess.check_call(["git", "pull"], cwd="sleuthkit")
if self.use_head:
print("Pulling from HEAD")
else:
print("Pulling from tag: {0:s}".format(self._SLEUTHKIT_GIT_TAG))
subprocess.check_call(["git", "fetch", "--force", "--tags"], cwd="sleuthkit")
git_tag_path = "tags/sleuthkit-{0:s}".format(self._SLEUTHKIT_GIT_TAG)
subprocess.check_call(["git", "checkout", git_tag_path], cwd="sleuthkit")
self.patch_sleuthkit()
compiler_type = distutils.ccompiler.get_default_compiler()
if compiler_type != "msvc":
subprocess.check_call(["./bootstrap"], cwd="sleuthkit")
# Now derive the version based on the date.
with open("version.txt", "w") as fd:
fd.write(self.version)
libtsk_path = os.path.join("sleuthkit", "tsk")
# Generate the Python binding code (pytsk3.cpp).
libtsk_header_files = [
os.path.join(libtsk_path, "libtsk.h"),
os.path.join(libtsk_path, "base", "tsk_base.h"),
os.path.join(libtsk_path, "fs", "tsk_fs.h"),
os.path.join(libtsk_path, "img", "tsk_img.h"),
os.path.join(libtsk_path, "vs", "tsk_vs.h"),
"tsk3.h"]
print("Generating bindings...")
generate_bindings.generate_bindings(
"pytsk3.cpp", libtsk_header_files, initialization="tsk_init();")
class ProjectBuilder(object):
"""Class to help build the project."""
def __init__(self, project_config, argv):
"""Initializes a project builder object."""
self._project_config = project_config
self._argv = argv
# The path to the sleuthkit/tsk directory.
self._libtsk_path = os.path.join("sleuthkit", "tsk")
# Paths under the sleuthkit/tsk directory which contain files we need
# to compile.
self._sub_library_names = ["base", "docs", "fs", "img", "pool", "util", "vs"]
# The args for the extension builder.
self.extension_args = {
"include_dirs": ["talloc", self._libtsk_path, "sleuthkit", "."],
"library_dirs": []}
# The sources to build.
self._source_files = [
"class.cpp", "error.cpp", "tsk3.cpp", "pytsk3.cpp", "talloc/talloc.c"]
# Path to the top of the unpacked sleuthkit sources.
self._sleuthkit_path = "sleuthkit"
def build(self):
"""Build everything."""
# Fetch all c and cpp files from the subdirs to compile.
extension_file = os.path.join(
self._libtsk_path, "auto", "guid.cpp")
self._source_files.append(extension_file)
for library_name in self._sub_library_names:
for extension in ("*.c", "*.cpp"):
extension_glob = os.path.join(
self._libtsk_path, library_name, extension)
self._source_files.extend(glob.glob(extension_glob))
# Sort the soure files to make sure they are in consistent order when
# building.
source_files = sorted(self._source_files)
ext_modules = [Extension("pytsk3", source_files, **self.extension_args)]
setup(
cmdclass={
"build_ext": BuildExtCommand,
"bdist_msi": BdistMSICommand,
"bdist_rpm": BdistRPMCommand,
"sdist": SDistCommand,
"update": UpdateCommand},
ext_modules=ext_modules,
**self._project_config)
if __name__ == "__main__":
__version__ = open("version.txt").read().strip()
setup_args = dict(
name="pytsk3",
version=__version__,
description="Python bindings for the sleuthkit",
long_description=(
"Python bindings for the sleuthkit (http://www.sleuthkit.org/)"),
license="Apache 2.0",
url="https://github.com/py4n6/pytsk/",
author="Michael Cohen and Joachim Metz",
author_email="scudette@gmail.com, joachim.metz@gmail.com",
zip_safe=False)
ProjectBuilder(setup_args, sys.argv).build()
| 31.809441 | 83 | 0.631822 | 2,222 | 18,195 | 5.004951 | 0.221872 | 0.011869 | 0.017984 | 0.023739 | 0.21293 | 0.176423 | 0.13488 | 0.122111 | 0.10125 | 0.092977 | 0 | 0.008708 | 0.236329 | 18,195 | 571 | 84 | 31.865149 | 0.791652 | 0.169277 | 0 | 0.215847 | 0 | 0 | 0.183601 | 0.024699 | 0 | 0 | 0 | 0.001751 | 0 | 1 | 0.035519 | false | 0 | 0.057377 | 0 | 0.131148 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aca48c97086c83ffa3c7ca8f644d9105be76a624 | 5,238 | py | Python | tests/archivers_test.py | xa4a/djtools | 05131bfe96aaf85dc8f672cd3b520bc14a37d095 | [
"Apache-2.0"
] | 1 | 2020-01-02T11:35:15.000Z | 2020-01-02T11:35:15.000Z | tests/archivers_test.py | xa4a/djtools | 05131bfe96aaf85dc8f672cd3b520bc14a37d095 | [
"Apache-2.0"
] | null | null | null | tests/archivers_test.py | xa4a/djtools | 05131bfe96aaf85dc8f672cd3b520bc14a37d095 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from bpylist import archiver
from bpylist import archive_types
from djtools.djay import models
from .common import dj_tests
class TestArchivers(unittest.TestCase):
def setUp(self):
models.register()
def test_verify_dataclass_has_fields(self):
with self.assertRaises(archive_types.Error):
bplist = dj_tests.get_fixture_from_xml(
'cuepoint_extra_field.plist.xml')
archiver.unarchive(bplist)
def test_title_unarchive(self):
bplist = dj_tests.get_fixture_from_xml('adctitle.plist.xml')
expected = dj_tests.EXPECTED_TITLE
actual = archiver.unarchive(bplist)
self.assertEqual(actual, expected)
def test_title_e2e(self):
expected = models.ADCMediaItemTitleID(
title='title',
artist='artist',
uuid='UuId',
internalID='UuId',
stringRepresentation='String Repr',
duration=15.3,
)
actual = archiver.unarchive(archiver.archive(expected))
self.assertEqual(actual, expected)
def test_cuepoint_unarchive(self):
bplist = dj_tests.get_fixture_from_xml('cuepoint.plist.xml')
expected = dj_tests.EXPECTED_CUEPOINT
actual = archiver.unarchive(bplist)
self.assertEqual(actual, expected)
def test_cuepoint_e2e(self):
expected = models.ADCCuePoint(
comment="bar",
number=2,
time=15.2,
)
actual = archiver.unarchive(archiver.archive(expected))
self.assertEqual(actual, expected)
def test_userdata_unarchive(self):
bplist = dj_tests.get_fixture_from_xml('userdata.plist.xml')
expected = dj_tests.EXPECTED_USER_DATA
actual = archiver.unarchive(bplist)
self.assertEqual(actual, expected, f'\n{actual} != \n{expected}')
def test_userdata_e2e(self):
expected = models.ADCMediaItemUserData(
cuePoints=[
models.ADCCuePoint(comment=None, number=1,
time=3.2826459407806396),
models.ADCCuePoint(comment=None, number=2,
time=114.29496765136719),
models.ADCCuePoint(comment=None, number=3,
time=114.83682250976562)
],
startPoint=models.ADCCuePoint(comment=None, number=0,
time=112.90266418457031),
uuid='71f9ccc746630c592ceeed39cbc837b2',
playCount=7,
energy=15,
highEQ=10.30,
midEQ=2.0,
lowEQ=3.0,
manualBPM=117.33,
manualBeatTime=1.01,
manualKeySignatureIndex=7,
rating=3,
# TODO: Populate the fields.
linkedUserDataUUIDs=None,
loopRegions=None,
manualFirstDownBeatIndices=None,
manualGridStartPoints=None,
tagUUIDs=None,
endPoint=None,
)
actual = archiver.unarchive(archiver.archive(expected))
self.assertEqual(actual, expected)
def test_analyzed_data_unarchive(self):
bplist = dj_tests.get_fixture_from_xml('analyzed_data.plist.xml')
expected = dj_tests.EXPECTED_ANALYZED_DATA
actual = archiver.unarchive(bplist)
self.assertEqual(actual, expected)
def test_analyzed_data_e2e(self):
expected = models.ADCMediaItemAnalyzedData(
bpm=1,
keySignatureIndex=10,
uuid="foo",
)
actual = archiver.unarchive(archiver.archive(expected))
self.assertEqual(actual, expected)
def test_location_unarchive(self):
bplist = dj_tests.get_fixture_from_xml('location.plist.xml')
expected = dj_tests.EXPECTED_MEDIA_ITEM_LOCATION
actual = archiver.unarchive(bplist)
self.assertEqual(actual, expected)
def test_location_e2e(self):
expected = models.ADCMediaItemLocation(
sourceURIs={
models.NSURL(
NSrelative='file:///tmp/foo.wav',
NSbase=None
),
models.NSURL(
NSrelative='com.apple.iTunes:123456',
NSbase=None
)
},
type=3,
urlBookmarkData=models.NSMutableData(
NSdata=b'not a b64-encoded string'
),
uuid='71f9'
)
actual = archiver.unarchive(archiver.archive(expected))
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| 34.688742 | 74 | 0.611684 | 534 | 5,238 | 5.853933 | 0.355805 | 0.026871 | 0.073576 | 0.09277 | 0.427703 | 0.384197 | 0.334613 | 0.324376 | 0.301983 | 0.212092 | 0 | 0.039934 | 0.302024 | 5,238 | 150 | 75 | 34.92 | 0.815098 | 0.109775 | 0 | 0.211864 | 0 | 0 | 0.063898 | 0.023236 | 0 | 0 | 0 | 0.006667 | 0.09322 | 1 | 0.101695 | false | 0 | 0.042373 | 0 | 0.152542 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aca53b07ddfe117a6a5f1d77a5aee68a14523f44 | 9,156 | py | Python | src/junison/merger.py | ztane/junison | 3ca1b76505dbaf493988483768bd75b0aaa2661f | [
"BSD-2-Clause"
] | null | null | null | src/junison/merger.py | ztane/junison | 3ca1b76505dbaf493988483768bd75b0aaa2661f | [
"BSD-2-Clause"
] | null | null | null | src/junison/merger.py | ztane/junison | 3ca1b76505dbaf493988483768bd75b0aaa2661f | [
"BSD-2-Clause"
] | null | null | null | from enum import IntEnum
from numbers import Number
from typing import Union, Dict, List
import copy
from collections import OrderedDict
class MergeException(Exception):
pass
UNDEFINED = object()
DELETE = object()
JSON = Union[
Dict[str, 'JSON'],
List['JSON'],
int,
float,
str,
bool,
None
]
def _is_value_type(type):
return type in {bool, Number, str, None, UNDEFINED}
class DefaultTo(IntEnum):
HEAD = 0
UPDATE = 1
class ValueConflictHandler:
def __init__(self, default_to: DefaultTo = DefaultTo.UPDATE):
self._default_to = default_to
def merge(self, *, merger, path, root, head, update):
return [head, update][self._default_to.value]
class ObjectSetConflictHandler:
def __init__(self, id='id'):
self._id_field = id
@staticmethod
def _merge_ordered_sets(list_a, list_b):
"""
Merge two ordered set so that the ordering of list_b is retained
and interleaved with those items appearing solely in list_a
:param list_a: the first list
:param list_b: the second list
:return: the merged list
"""
before = {}
b_set = set(list_b)
un_anchored = []
for i in list_a:
if i in b_set:
before[i] = un_anchored
un_anchored = []
else:
un_anchored.append(i)
at_the_end = un_anchored
result = []
for i in list_b:
result.extend(before.get(i, ()))
result.append(i)
result.extend(at_the_end)
return result
def merge(self, *, merger, path, root, head, update):
def get_id(item):
try:
if isinstance(item, dict):
return item[self._id_field]
else:
return item
except KeyError:
raise ValueError('{} doesn\'t have id field {}'
.format(item, self._id_field))
if root is UNDEFINED:
root = []
if head is UNDEFINED:
head = []
if update is UNDEFINED:
update = []
root_items = OrderedDict((get_id(item), item) for item in root)
head_items = OrderedDict((get_id(item), item) for item in head)
update_items = OrderedDict((get_id(item), item) for item in update)
result_items = {}
for i in root_items:
if i not in head_items:
if i not in update_items:
result_items[i] = DELETE
elif update_items[i] == root_items[i]:
result_items[i] = DELETE
else:
# ONE DELETED, other UPDATED, keep the updated!
# self.merger._add_conflict()
result_items[i] = copy.deepcopy(update_items[i])
elif i not in update_items:
# it is in head_items nevertheless. If there is no conflict,
# remove, otherwise keep added
if head_items[i] == root_items[i]:
result_items[i] = DELETE
else:
result_items[i] = copy.deepcopy(head_items[i])
else:
# all 3 exist, do a merge.
result_items[i] = merger._do_merge(
path=path,
root=root_items[i],
head=head_items[i],
update=update_items[i])
for i in head_items:
if i in result_items:
continue
# the item is new.
if i in update_items:
result_items[i] = merger._do_merge(
path=path,
root={},
head=head_items[i],
update=update_items[i]
)
else:
result_items[i] = copy.deepcopy(head_items[i])
for i in update_items:
if i in result_items:
continue
# this is the only place where they now occurred
result_items[i] = copy.deepcopy(update_items[i])
actual_order = self._merge_ordered_sets(head_items, update_items)
return [result_items[i]
for i in actual_order
if result_items[i] is not DELETE]
class DictMerger:
def merge(self, *, merger, path, root, head, update):
if root is UNDEFINED:
root = {}
if head is UNDEFINED:
head = {}
if update is UNDEFINED:
update = {}
all_keys = set(root.keys()) | set(head.keys()) | set(update.keys())
result = {}
for i in all_keys:
value = merger._do_merge(
path=path + (i,),
root=root.get(i, UNDEFINED),
head=head.get(i, UNDEFINED),
update=update.get(i, UNDEFINED)
)
if value is not UNDEFINED:
result[i] = value
return result
def normalize_key(key):
if isinstance(key, tuple):
return key
if isinstance(key, list):
return tuple(key)
if key == '':
return ()
return tuple(key.split('.'))
class Merger:
_default_value_conflict_strategy = ValueConflictHandler()
_default_list_conflict_strategy = ObjectSetConflictHandler()
_default_dict_conflict_strategy = DictMerger()
def __init__(
self,
list_conflict_handlers=None,
value_conflict_handlers=None,
default_value_conflict_handler=None,
default_list_conflict_handler=None):
"""
Initialize a new merger instance.
"""
self._list_conflict_handlers = {
normalize_key(key): value
for (key, value)
in (list_conflict_handlers or {}).items()
}
self._value_conflict_handlers = {
normalize_key(key): value
for (key, value)
in (value_conflict_handlers or {}).items()
}
if default_value_conflict_handler is not None:
self._default_value_conflict_strategy = \
default_value_conflict_handler
if default_list_conflict_handler is not None:
self._default_list_conflict_strategy = default_list_conflict_handler
def _copy(self, item):
"""
Returns a copy of the given item. The sentinels are not copied but
returned as is
:param item: the item to be copied
:return: a fresh deep copy (if necessary)
"""
return copy.deepcopy(item)
def _type(self, inst):
if isinstance(inst, bool):
return bool
if isinstance(inst, Number):
return Number
if isinstance(inst, str):
return str
if isinstance(inst, (list, tuple)):
return list
if isinstance(inst, dict):
return dict
if inst is None:
return None
if inst is UNDEFINED:
return UNDEFINED
raise TypeError('The value {!r} is not a JSON value'.format(inst))
def _get_merge_algorithm(self, *, path, rtype, htype, utype):
if _is_value_type(rtype) and _is_value_type(htype) and _is_value_type(
utype):
return self._value_conflict_handlers.get(
path,
self._default_value_conflict_strategy
)
if rtype in (dict, UNDEFINED) and htype in (dict, UNDEFINED) and utype \
in (dict, UNDEFINED):
return self._default_dict_conflict_strategy
if rtype in (list, UNDEFINED) and htype in (list, UNDEFINED) and utype \
in (list, UNDEFINED):
return self._list_conflict_handlers.get(
path,
self._default_list_conflict_strategy
)
raise TypeError('Unable to merge types root={}, head={}, update={}'
.format(rtype, htype, utype))
def _do_merge(self,
*,
path,
root: JSON,
head: JSON,
update: JSON) -> JSON:
if root == head:
return self._copy(update)
if root == update:
return self._copy(head)
merger = self._get_merge_algorithm(
path=path,
rtype=self._type(root),
htype=self._type(head),
utype=self._type(update))
return merger.merge(merger=self,
path=path,
root=root,
head=head,
update=update)
def merge(
self,
*,
root: JSON,
head: JSON = UNDEFINED,
update: JSON) -> JSON:
"""
Perform a 3-way merge, using the given root, head and update.
:param root:
:param head:
:param update:
:return:
"""
return self._do_merge(
path=(),
root=root,
head=head,
update=update
)
| 27.25 | 80 | 0.529598 | 1,015 | 9,156 | 4.572414 | 0.162562 | 0.031028 | 0.028442 | 0.01379 | 0.284421 | 0.239173 | 0.216333 | 0.174962 | 0.122387 | 0.066365 | 0 | 0.000714 | 0.387724 | 9,156 | 335 | 81 | 27.331343 | 0.827149 | 0.084098 | 0 | 0.248908 | 0 | 0.004367 | 0.012563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065502 | false | 0.004367 | 0.021834 | 0.008734 | 0.248908 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aca631093920542a70a8801e7f36cfd58a9040c6 | 1,642 | py | Python | db_hammer/util/net.py | liuzhuogood/db-hammer | 133eb09cb83cabb82690d35470e57232c350b79b | [
"MIT"
] | 3 | 2020-09-17T10:21:50.000Z | 2021-11-16T10:29:57.000Z | db_hammer/util/net.py | liuzhuogood/db-hammer | 133eb09cb83cabb82690d35470e57232c350b79b | [
"MIT"
] | null | null | null | db_hammer/util/net.py | liuzhuogood/db-hammer | 133eb09cb83cabb82690d35470e57232c350b79b | [
"MIT"
] | null | null | null | import socket
def is_inuse(ip, port):
"""端口是否被占用"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def get_random_port(ip):
"""根据IP获取一个随机端口(15000~20000)"""
import random
times = 0
max_times = 50
port = random.randint(15000, 20000)
while is_inuse(ip, port) and times < max_times:
port = random.randint(15000, 20000)
times += 1
if times > max_times:
Exception("端口号获取失败")
return port
def get_pc_name_ip(host):
"""获取当前IP与主机名 返回:(ip,name)"""
name = socket.getfqdn(socket.gethostname())
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host.split(":")[0], int(host.split(":")[1])))
ip = s.getsockname()[0]
finally:
s.close()
return name, ip
def recv_end(the_socket, SOCKET_END_TAG):
"""通过寻找接收的协议数据中的尾标识字符串,获取完整的数据的数据报文"""
total_data = []
while True:
data = the_socket.recv(8192)
if SOCKET_END_TAG in data:
total_data.append(data[:data.find(SOCKET_END_TAG)])
break
total_data.append(data)
if len(total_data) > 1:
# check if end_of_data was split
last_pair = total_data[-2] + total_data[-1]
if SOCKET_END_TAG in last_pair:
total_data[-2] = last_pair[:last_pair.find(SOCKET_END_TAG)]
total_data.pop()
break
if len(total_data) == 0:
return None
return b''.join(total_data)
| 26.483871 | 75 | 0.584044 | 217 | 1,642 | 4.221198 | 0.345622 | 0.098253 | 0.065502 | 0.028384 | 0.216157 | 0.082969 | 0.082969 | 0.082969 | 0.082969 | 0 | 0 | 0.041308 | 0.292326 | 1,642 | 61 | 76 | 26.918033 | 0.746988 | 0.0743 | 0 | 0.125 | 0 | 0 | 0.006004 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.041667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aca6551a2099c0e8c330637f14aed55e3b595693 | 3,652 | py | Python | base/site-packages/reporting/templatetags/reporting.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/reporting/templatetags/reporting.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/reporting/templatetags/reporting.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | from django.db.models.fields.related import RelatedField
from django.db.models.fields import DateField
import datetime
from django.utils.translation import get_date_formats, get_partial_date_formats, ugettext as _
from django.utils import dateformat
from django.utils.safestring import mark_safe
from django.template import Library
register = Library()
def get_date_model_field(model, lookup):
parts = lookup.split('__')
field = model._meta.get_field(parts[0])
if not isinstance(field, RelatedField):
if not isinstance(field, DateField):
raise Exception('%s is not a date field' % lookup)
return model, lookup
rel_model = field.rel.to
if len(parts) == 1:
raise Exception('%s is not a date field' % lookup)
next_lookup = '__'.join(parts[1:])
return get_date_model_field(rel_model, next_lookup)
def report_date_hierarchy(cl):
if cl.date_hierarchy:
model, field_name = get_date_model_field(cl.model, cl.date_hierarchy)
rel_query_set = model.objects.all()
year_field = '%s__year' % cl.date_hierarchy
month_field = '%s__month' % cl.date_hierarchy
day_field = '%s__day' % cl.date_hierarchy
field_generic = '%s__' % cl.date_hierarchy
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
year_month_format, month_day_format = get_partial_date_formats()
link = lambda d: mark_safe(cl.get_query_string(d, [field_generic]))
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': dateformat.format(day, year_month_format)
},
'choices': [{'title': dateformat.format(day, month_day_format)}]
}
elif year_lookup and month_lookup:
days = rel_query_set.filter(**{'%s__year' % field_name: year_lookup, '%s__month' % field_name: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': year_lookup
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': dateformat.format(day, month_day_format)
} for day in days]
}
elif year_lookup:
months = rel_query_set.filter(**{'%s__year' % field_name: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': dateformat.format(month, year_month_format)
} for month in months]
}
else:
years = rel_query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: year.year}),
'title': year.year
} for year in years]
}
report_date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(report_date_hierarchy) | 42.465116 | 146 | 0.579956 | 422 | 3,652 | 4.718009 | 0.203791 | 0.060271 | 0.045203 | 0.042692 | 0.298845 | 0.250628 | 0.223506 | 0.185334 | 0.185334 | 0.149171 | 0 | 0.001192 | 0.310789 | 3,652 | 86 | 147 | 42.465116 | 0.789829 | 0 | 0 | 0.202532 | 0 | 0 | 0.071722 | 0.006844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025316 | false | 0 | 0.088608 | 0 | 0.189873 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acabbda50c815d610292fcdde36837b94936bdac | 1,852 | py | Python | setup.py | varkiwi/git3-client | 1fab576926091f6a771fbce05be8494d22e3efe4 | [
"MIT"
] | 4 | 2021-08-18T15:24:02.000Z | 2022-02-24T13:33:05.000Z | setup.py | varkiwi/git3-client | 1fab576926091f6a771fbce05be8494d22e3efe4 | [
"MIT"
] | 29 | 2020-12-14T18:38:42.000Z | 2022-03-31T12:13:54.000Z | setup.py | varkiwi/git3-client | 1fab576926091f6a771fbce05be8494d22e3efe4 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
readme = open('README.md', 'r')
content = readme.read()
readme.close()
setup(
name = "git3Client",
packages = find_packages('.'),
include_package_data = True,
entry_points = {
"console_scripts": [
"git3 = git3Client.__main__:run",
]
},
version = "0.2.1",
description = "Git3 Python client",
long_description = content,
long_description_content_type="text/markdown",
author = "Jacek Varky",
author_email = "jaca347@gmail.com",
install_requires=[
'attrs==20.2.0',
'base58==2.0.1',
'bitarray==1.2.2',
'certifi==2020.6.20',
'chardet==3.0.4',
'cytoolz==0.11.0',
'eth-abi==2.1.1',
'eth-account==0.5.5',
'eth-hash==0.2.0',
'eth-keyfile==0.5.1',
'eth-keys==0.3.3',
'eth-rlp==0.2.1',
'eth-typing==2.2.2',
'eth-utils==1.9.5',
'hexbytes==0.2.1',
'idna==2.10',
'importlib-metadata==4.0.1',
'importlib-resources==3.0.0',
'ipfshttpclient==0.8.0a2',
'jsonschema==3.2.0',
'lru-dict==1.1.6',
'multiaddr==0.0.9',
'netaddr==0.8.0',
'parsimonious==0.8.1',
'protobuf==3.13.0',
'pycryptodome==3.9.8',
'pyrsistent==0.17.3',
'requests==2.24.0',
'rlp==2.0.0',
'rusty-rlp==0.1.15',
'six==1.15.0',
'toolz==0.11.1',
'typing-extensions==3.7.4.3',
'urllib3==1.25.11',
'varint==1.0.2',
#'web3==5.12.3',
'web3==5.23.1',
'websockets==9.1',
'zipp==3.3.1',
],
url = "https://github.com/varkiwi/git3-client",
classifiers=[
"Development Status :: 3 - Alpha",
],
) | 27.641791 | 52 | 0.475702 | 241 | 1,852 | 3.589212 | 0.46888 | 0.011561 | 0.010405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119273 | 0.316415 | 1,852 | 67 | 53 | 27.641791 | 0.563981 | 0.008099 | 0 | 0.03125 | 0 | 0 | 0.455675 | 0.069452 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.046875 | 0 | 0.046875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acad12449ab06b69399640616c08240f6950d3ed | 1,194 | py | Python | src/messageSubscriber.py | Cherden/xsn-telegram-address-monitor | 04e979465c7b6ae7302a042dff7fe3ff98f0e9c2 | [
"MIT"
] | null | null | null | src/messageSubscriber.py | Cherden/xsn-telegram-address-monitor | 04e979465c7b6ae7302a042dff7fe3ff98f0e9c2 | [
"MIT"
] | null | null | null | src/messageSubscriber.py | Cherden/xsn-telegram-address-monitor | 04e979465c7b6ae7302a042dff7fe3ff98f0e9c2 | [
"MIT"
] | 1 | 2020-03-20T22:43:09.000Z | 2020-03-20T22:43:09.000Z | from telegram.ext import Updater
from mongo_connector import MongoConnector
from configparser import ConfigParser
cp = ConfigParser()
cp.optionxform = str
cp.read('config.ini')
db = MongoConnector()
db.connect(cp['DATABASE']['Address'], cp['DATABASE']['Name'])
telegram_bot_token = cp['TELEGRAM']['SecretKey']
monitoring_collection = cp['DATABASE']['MonitoringCollection']
def main():
message = "Due to maintenance work, the bot was temporarily unavailable. \n" \
"We apologize for this. The bot is now up and running again. If you encounter any bugs, please report them to us at Discord. \
Have a nice weekend."
updater = Updater(telegram_bot_token)
dispatcher = updater.dispatcher
id_list = []
success, monitors = db.find(monitoring_collection, {}, many=True)
if success:
for monitor in monitors:
if not monitor["telegram_id"] in id_list:
id_list.append(monitor["telegram_id"])
for id in id_list:
try:
updater.bot.send_message(id, message)
except Exception as e:
print("User blocked bot by id:", id)
exit()
if __name__ == '__main__':
main() | 29.121951 | 140 | 0.664154 | 151 | 1,194 | 5.10596 | 0.569536 | 0.031128 | 0.041505 | 0.02594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230318 | 1,194 | 41 | 141 | 29.121951 | 0.838955 | 0 | 0 | 0 | 0 | 0.033333 | 0.166527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.1 | 0 | 0.133333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acaeb5f7ecf7ea2e59ad0c49a214d1e068ccdee2 | 7,124 | py | Python | main.py | hamley241/FashionAI | 7cc55e08a47df1ec592f857fe9de46262f06842b | [
"MIT"
] | null | null | null | main.py | hamley241/FashionAI | 7cc55e08a47df1ec592f857fe9de46262f06842b | [
"MIT"
] | null | null | null | main.py | hamley241/FashionAI | 7cc55e08a47df1ec592f857fe9de46262f06842b | [
"MIT"
] | null | null | null | import argparse
import os
import os.path
import torch
import torch.nn.functional as F
import torch.optim as optim
import model as m
from torch.autograd import Variable
from dataset import FashionAI
import matplotlib
import pickle
import copy
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Training settings
parser = argparse.ArgumentParser(description='FashionAI')
parser.add_argument('--model', type=str, default='resnet34', metavar='M',
help='model name')
parser.add_argument('--attribute', type=str, default='coat_length_labels', metavar='A',
help='fashion attribute (default: coat_length_labels)')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=10, metavar='N',
help='input batch size for testing (default: 10)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 50)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0, metavar='M',
help='SGD momentum (default: 0)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--ci', action='store_true', default=False,
help='running CI')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print("Loading trainset")
trainset = FashionAI('./', attribute=args.attribute, split=0.8, ci=args.ci, data_type='train', reset=False)
print("Loading testset")
testset = FashionAI('./', attribute=args.attribute, split=0.8, ci=args.ci, data_type='test', reset=trainset.reset)
print("Creating train loader")
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
print("Test loader")
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
if args.ci:
args.model = 'ci'
print("Loading a model for training")
model = m.create_model(args.model, FashionAI.AttrKey[args.attribute])
print("Loading save folder")
save_folder = os.path.join(os.path.expanduser('.'), 'save', args.attribute, args.model)
print("Check point folder check")
if os.path.exists(os.path.join(save_folder, args.model + '_checkpoint.pth')):
start_epoch = torch.load(os.path.join(save_folder, args.model + '_checkpoint.pth'))
model.load_state_dict(torch.load(os.path.join(save_folder, args.model + '_' + str(start_epoch) + '.pth')))
else:
start_epoch = 0
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
def train(epoch):
model.train()
correct = 0
train_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
train_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if not os.path.exists(save_folder):
os.makedirs(save_folder)
# torch.save(model.state_dict(), os.path.join(save_folder, args.model + '_' + str(epoch) + '.pth'))
# torch.save(epoch, os.path.join(save_folder, args.model + '_checkpoint.pth'))
train_loss /= len(train_loader.dataset)
print('Train set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
return {'loss': train_loss, 'accuracy': 100. *correct/ len(train_loader.dataset)}
best_accuracy = 0
def test():
global best_accuracy
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = data, target
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
current_accuracy = 100. * correct / len(test_loader.dataset)
if best_accuracy < current_accuracy:
print("Saving model current "+str(current_accuracy)+" "+"last best "+str(best_accuracy))
best_accuracy = current_accuracy
torch.save(model.state_dict(), os.path.join(save_folder, args.model + '_' + str(epoch) + '.pth'))
torch.save(epoch, os.path.join(save_folder, args.model + '_checkpoint.pth'))
return {'loss':test_loss, 'accuracy':100. *correct/ len(test_loader.dataset)}
def save_fig(name_fig, tight_layout=True):
path = os.path.join("./", "images", name_fig + ".png")
print("Saving figure", name_fig)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
train_loss = []
train_accuracy = []
test_loss = []
test_accuracy = []
print("Starting training")
for epoch in range(start_epoch + 1, args.epochs + 1):
loss_acc = train(epoch)
train_loss.append(copy.deepcopy(loss_acc.get('loss')))
train_accuracy.append(copy.deepcopy(loss_acc.get('accuracy')))
tloss_acc = test()
test_loss.append(copy.deepcopy(tloss_acc.get('loss')))
test_accuracy.append(copy.deepcopy(tloss_acc.get('accuracy')))
train_loss_acc = {'acc':train_accuracy, 'loss':train_loss}
test_loss_acc = {'acc':test_accuracy, 'loss':test_loss}
pickle.dump(train_loss_acc, open("train_metrics.p","wb"))
pickle.dump(test_loss_acc, open("test_metrics.p","wb"))
| 43.175758 | 114 | 0.669568 | 981 | 7,124 | 4.720693 | 0.206932 | 0.016843 | 0.04038 | 0.021162 | 0.362989 | 0.31138 | 0.248327 | 0.19607 | 0.195422 | 0.167135 | 0 | 0.013445 | 0.175182 | 7,124 | 164 | 115 | 43.439024 | 0.774677 | 0.043515 | 0 | 0.098592 | 0 | 0 | 0.164855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021127 | false | 0 | 0.091549 | 0 | 0.126761 | 0.091549 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acb674a079999b51055ceace35b57b618f9d2c03 | 3,756 | py | Python | qnoodles/qnoodles.py | jhidding/qnoodles | cf9b7f59a8353f35e5770e1333fd26655f03db11 | [
"Apache-2.0"
] | 1 | 2016-08-20T06:44:29.000Z | 2016-08-20T06:44:29.000Z | qnoodles/qnoodles.py | jhidding/qnoodles | cf9b7f59a8353f35e5770e1333fd26655f03db11 | [
"Apache-2.0"
] | null | null | null | qnoodles/qnoodles.py | jhidding/qnoodles | cf9b7f59a8353f35e5770e1333fd26655f03db11 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
A graphical user interface (PySide) on top of the FireWorks workflow engine.
@author: Johan Hidding
@organisation: Netherlands eScience Center (NLeSC)
@contact: j.hidding@esciencecenter.nl
"""
import sys, os
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from .nodebox import NodeBox
#from .sourceview import SourceView
class NodeView(QtGui.QGraphicsView):
def __init__(self, scene):
super(NodeView, self).__init__(scene)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.show()
class NodeScene(QtGui.QGraphicsScene):
def __init__(self, data_model):
super(NodeScene, self).__init__()
self.nodes = [NodeBox(n, self) for i, n in data_model.all_nodes()]
def noodletPressed(self, i, s):
pass
#print("{0}-{1} pressed".format(i, s))
def noodletReleased(self, i, s):
pass
#print("{0}-{1} released".format(i, s))
class NoodlesWindow(QtGui.QMainWindow):
def __init__(self, data_model):
super(NoodlesWindow, self).__init__()
self.data_model = data_model
self.initUI()
def initUI(self):
style = str(open("static/qt-style.css", "r").read())
self.nodeScene = NodeScene(self.data_model)
self.nodeView = NodeView(self.nodeScene)
self.nodeView.setStyleSheet(style)
#self.sourceView = SourceView()
self.tabWidget = QtGui.QTabWidget()
self.tabWidget.addTab(self.nodeView, "Graph view")
#self.tabWidget.addTab(self.sourceView, "Source view")
self.setCentralWidget(self.tabWidget)
self.setGeometry(300, 300, 1024, 600)
self.setWindowTitle('Noodles')
self.setWindowIcon(QtGui.QIcon('static/noodles-icon.png'))
self.statusBar().showMessage('Ready')
exitAction = QtGui.QAction(QtGui.QIcon.fromTheme('application-exit'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(exitAction)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.nodeRepository = QtGui.QToolBox()
self.flowNodeList = QtGui.QListWidget()
self.compositeNodeList = QtGui.QListWidget()
self.libraryNodeList = QtGui.QListWidget()
self.nodeRepository.addItem(self.flowNodeList, "flow control")
self.nodeRepository.addItem(self.libraryNodeList, "library nodes")
self.nodeRepository.addItem(self.compositeNodeList, "composite nodes")
dockWidget = QtGui.QDockWidget("Noodles node repository")
dockWidget.setWidget(self.nodeRepository)
self.addDockWidget(Qt.RightDockWidgetArea, dockWidget)
self.show()
def closeEvent(self, event):
pass
# reply = QtGui.QMessageBox.question(self, 'Message',
# "Are you sure to quit?", QtGui.QMessageBox.Yes |
# QtGui.QMessageBox.No, QtGui.QMessageBox.No)
# if reply == QtGui.QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
#self.sourceView.backend.stop()
def main(model):
app = QtGui.QApplication(sys.argv)
# Qode.backend.CodeCompletionWorker.providers.append(
# backend.DocumentWordsProvider())
# Qode.backend.serve_forever()
win = NoodlesWindow(model)
sys.exit(app.exec_())
| 32.102564 | 92 | 0.638711 | 381 | 3,756 | 6.209974 | 0.448819 | 0.022823 | 0.021978 | 0.021555 | 0.035503 | 0.035503 | 0.01437 | 0 | 0 | 0 | 0 | 0.006685 | 0.243344 | 3,756 | 116 | 93 | 32.37931 | 0.825827 | 0.228701 | 0 | 0.114754 | 0 | 0 | 0.063091 | 0.008062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0.04918 | 0.065574 | 0 | 0.245902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc0b1415e03bc6564d0369fac35616c4cd6bb6b | 7,017 | py | Python | modAL/pal.py | mherde/modAL | 10c8b896b8faf2fec4ded2ae704aaa4c3c7505ca | [
"MIT"
] | null | null | null | modAL/pal.py | mherde/modAL | 10c8b896b8faf2fec4ded2ae704aaa4c3c7505ca | [
"MIT"
] | null | null | null | modAL/pal.py | mherde/modAL | 10c8b896b8faf2fec4ded2ae704aaa4c3c7505ca | [
"MIT"
] | null | null | null | import numpy as np
import itertools
from scipy.special import factorial, gammaln
from modAL.utils.parzen_window_classifier import PWC
from typing import Tuple
import numpy as np
from scipy.stats import entropy
from sklearn.exceptions import NotFittedError
from sklearn.base import BaseEstimator
from modAL.utils.data import modALinput
from modAL.utils.selection import multi_argmax, shuffled_argmax
def cost_reduction(k_vec_list, C=None, m_max=2, prior=1.e-3):
"""Calculates the expected cost reduction for given maximal number of hypothetically acquired labels,
observed labels and cost matrix.
Parameters
----------
k_vec_list: array-like, shape [n_classes]
Observed class labels.
C: array-like, shape = [n_classes, n_classes]
Cost matrix.
m_max: int
Maximal number of hypothetically acquired labels.
prior : int | array-like, shape [n_classes]
Prior value for each class.
Returns
-------
expected_cost_reduction: array-like, shape [n_samples]
Expected cost reduction for given parameters.
"""
n_classes = len(k_vec_list[0])
n_samples = len(k_vec_list)
# check cost matrix
C = 1 - np.eye(n_classes) if C is None else np.asarray(C)
# generate labelling vectors for all possible m values
l_vec_list = np.vstack([gen_l_vec_list(m, n_classes) for m in range(m_max + 1)])
m_list = np.sum(l_vec_list, axis=1)
n_l_vecs = len(l_vec_list)
# compute optimal cost-sensitive decision for all combination of k- and l-vectors
k_l_vec_list = np.swapaxes(np.tile(k_vec_list, (n_l_vecs, 1, 1)), 0, 1) + l_vec_list
y_hats = np.argmin(k_l_vec_list @ C, axis=2)
# add prior to k-vectors
prior = prior * np.ones(n_classes)
k_vec_list = np.asarray(k_vec_list) + prior
# all combination of k-, l-, and prediction indicator vectors
combs = [k_vec_list, l_vec_list, np.eye(n_classes)]
combs = np.asarray([list(elem) for elem in list(itertools.product(*combs))])
# three factors of the closed form solution
factor_1 = 1 / euler_beta(k_vec_list)
factor_2 = multinomial(l_vec_list)
factor_3 = euler_beta(np.sum(combs, axis=1)).reshape(n_samples, n_l_vecs, n_classes)
# expected classification cost for each m
m_sums = np.asarray(
[factor_1[k_idx] * np.bincount(m_list, factor_2 * [C[:, y_hats[k_idx, l_idx]] @ factor_3[k_idx, l_idx]
for l_idx in range(n_l_vecs)]) for k_idx in
range(n_samples)])
# compute classification cost reduction as difference
gains = np.zeros((n_samples, m_max)) + m_sums[:, 0].reshape(-1, 1)
gains -= m_sums[:, 1:]
# normalize classification cost reduction by number of hypothetical label acquisitions
gains /= np.arange(1, m_max + 1)
return np.max(gains, axis=1)
def gen_l_vec_list(m_approx, n_classes):
"""
Creates all possible class labeling vectors for given number of hypothetically acquired labels and given number of
classes.
Parameters
----------
m_approx: int
Number of hypothetically acquired labels..
n_classes: int,
Number of classes
Returns
-------
label_vec_list: array-like, shape = [n_labelings, n_classes]
All possible class labelings for given parameters.
"""
label_vec_list = [[]]
label_vec_res = np.arange(m_approx + 1)
for i in range(n_classes - 1):
new_label_vec_list = []
for labelVec in label_vec_list:
for newLabel in label_vec_res[label_vec_res - (m_approx - sum(labelVec)) <= 1.e-10]:
new_label_vec_list.append(labelVec + [newLabel])
label_vec_list = new_label_vec_list
new_label_vec_list = []
for labelVec in label_vec_list:
new_label_vec_list.append(labelVec + [m_approx - sum(labelVec)])
label_vec_list = np.array(new_label_vec_list, int)
return label_vec_list
def euler_beta(a):
"""
Represents Euler beta function: B(a(i)) = Gamma(a(i,1))*...*Gamma(a_n)/Gamma(a(i,1)+...+a(i,n))
Parameters
----------
a: array-like, shape (m, n)
Vectors to evaluated.
Returns
-------
result: array-like, shape (m)
Euler beta function results [B(a(0)), ..., B(a(m))
"""
return np.exp(np.sum(gammaln(a), axis=1)-gammaln(np.sum(a, axis=1)))
def multinomial(a):
"""
Computes Multinomial coefficient: Mult(a(i)) = (a(i,1)+...+a(i,n))!/(a(i,1)!...a(i,n)!)
Parameters
----------
a: array-like, shape (m, n)
Vectors to evaluated.
Returns
-------
result: array-like, shape (m)
Multinomial coefficients [Mult(a(0)), ..., Mult(a(m))
"""
return factorial(np.sum(a, axis=1))/np.prod(factorial(a), axis=1)
def probabilistic_al(classifier: BaseEstimator, X: modALinput,
n_instances: int = 1, random_tie_break: bool = False,
**pal_kwargs) -> Tuple[np.ndarray, modALinput]:
"""
Uncertainty sampling query strategy. Selects the least sure instances for labelling.
Args:
classifier: The classifier for which the labels are to be queried.
X: The pool of samples to query from.
n_instances: Number of samples to be queried.
random_tie_break: If True, shuffles utility scores to randomize the order. This
can be used to break the tie when the highest utility score is not unique.
**uncertainty_measure_kwargs: Keyword arguments to be passed for the uncertainty
measure function.
Returns:
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
prior = pal_kwargs.pop('prior', 0.001)
n_classes = pal_kwargs.pop('prior', 3)
X_labeled = classifier.X_training if classifier.X_training is not None else np.array([])
y_labeled = classifier.y_training if classifier.y_training is not None else np.array([])
X_cand = X if X is not None else np.array([])
X = []
for x in X_cand:
X.append(x)
for x in X_labeled:
X.append(x)
X = np.array(X)
# Determine gamma with heuristic
delta = np.sqrt(2) * 1e-6
N = min(X.shape[0] * len(X), 200)
D = X.shape[1]
s = np.sqrt((2 * N * D) / ((N - 1) * np.log((N - 1) / delta**2)))
gamma = 1 / (2 * s**2)
# Calculate similarities
clf_sim = PWC(len(X), gamma=gamma)
clf_sim.fit(X, range(len(X)))
sim = clf_sim.predict_proba(X, normalize=False)
densities = np.sum(sim, axis=0)[:len(X_cand)]
# Calculate gains with PWC
clf = PWC(n_classes, gamma=gamma)
clf.fit(X_labeled, y_labeled)
k_vec = clf.predict_proba(X_cand, normalize=False)
gains = densities * cost_reduction(k_vec, prior=prior, m_max=1)
if not random_tie_break:
query_idx = multi_argmax(gains, n_instances=n_instances)
else:
query_idx = shuffled_argmax(gains, n_instances=n_instances)
return query_idx, X[query_idx] | 33.898551 | 118 | 0.652558 | 1,065 | 7,017 | 4.114554 | 0.21784 | 0.051118 | 0.0356 | 0.020539 | 0.207896 | 0.15267 | 0.097672 | 0.070288 | 0.054313 | 0.054313 | 0 | 0.012095 | 0.234146 | 7,017 | 207 | 119 | 33.898551 | 0.803312 | 0.379507 | 0 | 0.094118 | 0 | 0 | 0.002462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.129412 | 0 | 0.247059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc4cf8b7a0c4399751eca550f31b74f57ee4560 | 453 | py | Python | aoc2019/python/day02.py | austinsalonen/advent_of_code | c085a813511ace023620739f367948c58e7ca5e7 | [
"MIT"
] | null | null | null | aoc2019/python/day02.py | austinsalonen/advent_of_code | c085a813511ace023620739f367948c58e7ca5e7 | [
"MIT"
] | null | null | null | aoc2019/python/day02.py | austinsalonen/advent_of_code | c085a813511ace023620739f367948c58e7ca5e7 | [
"MIT"
] | null | null | null | from int_code import program, IntCode
from copy import copy
def run(p1, p2):
p = program('day02.input')
p = [p[0]] + [p1, p2] + p[3:]
c = IntCode(p)
c.run()
return c.get(0)
print('part 1 =', run(12, 2))
def search_for(desired, path):
for n in range(0,100):
for v in range(0,100):
if run(n, v) == desired:
return n*100 + v
print('part 2 =', search_for(19690720, 'day02.input') )
# part 1 = 2894520
# part 2 = 9342
# [Finished in 1.2s] | 19.695652 | 55 | 0.613687 | 84 | 453 | 3.27381 | 0.464286 | 0.029091 | 0.036364 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 0.205298 | 453 | 23 | 56 | 19.695652 | 0.625 | 0.108168 | 0 | 0 | 0 | 0 | 0.094763 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.4 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc5bd8c02af8c28273e6e38732cb07d6a09c2a3 | 9,088 | py | Python | dapper/tools/series.py | aperrin66/DAPPER | d9d09ed87ca58d59972296e317bfeea50ba6cdd0 | [
"MIT"
] | 15 | 2021-02-23T01:39:01.000Z | 2021-03-24T00:10:00.000Z | dapper/tools/series.py | aperrin66/DAPPER | d9d09ed87ca58d59972296e317bfeea50ba6cdd0 | [
"MIT"
] | null | null | null | dapper/tools/series.py | aperrin66/DAPPER | d9d09ed87ca58d59972296e317bfeea50ba6cdd0 | [
"MIT"
] | 1 | 2021-05-29T08:42:15.000Z | 2021-05-29T08:42:15.000Z | """Time series management and processing."""
import numpy as np
from numpy import nan
from patlib.std import find_1st_ind
from struct_tools import NicePrint
from dapper.tools.rounding import UncertainQtty
def auto_cov(xx, nlags=4, zero_mean=False, corr=False):
"""Auto covariance function, computed along axis 0.
- `nlags`: max lag (offset) for which to compute acf.
- `corr` : normalize acf by `acf[0]` so as to return auto-CORRELATION.
With `corr=True`, this is identical to
`statsmodels.tsa.stattools.acf(xx,True,nlags)`
"""
assert nlags < len(xx)
N = len(xx)
A = xx if zero_mean else (xx - xx.mean(0))
acovf = np.zeros((nlags+1,)+xx.shape[1:])
for i in range(nlags+1):
Left = A[np.arange(N-i)]
Right = A[np.arange(i, N)]
acovf[i] = (Left*Right).sum(0)/(N-i)
if corr:
acovf /= acovf[0]
return acovf
def fit_acf_by_AR1(acf_empir, nlags=None):
"""Fit an empirical auto cov function (ACF) by that of an AR1 process.
- `acf_empir`: auto-corr/cov-function.
- `nlags`: length of ACF to use in AR(1) fitting
"""
if nlags is None:
nlags = len(acf_empir)
# geometric_mean = ss.mstats.gmean
def geometric_mean(xx): return np.exp(np.mean(np.log(xx)))
def mean_ratio(xx):
return geometric_mean([xx[i]/xx[i-1] for i in range(1, len(xx))])
# Negative correlation => Truncate ACF
neg_ind = find_1st_ind(np.array(acf_empir) <= 0)
acf_empir = acf_empir[:neg_ind]
if len(acf_empir) == 0:
return 0
elif len(acf_empir) == 1:
return 0.01
else:
return mean_ratio(acf_empir)
def estimate_corr_length(xx):
r"""Estimate the correlation length of a time series.
For explanation, see `dapper.mods.LA.homogeneous_1D_cov`.
Also note that, for exponential corr function, as assumed here,
$$\text{corr}(L) = \exp(-1) \approx 0.368$$
"""
acovf = auto_cov(xx, min(100, len(xx)-2))
a = fit_acf_by_AR1(acovf)
if a == 0:
L = 0
else:
L = 1/np.log(1/a)
return L
def mean_with_conf(xx):
"""Compute the mean of a 1d iterable `xx`.
Also provide confidence of mean,
as estimated from its correlation-corrected variance.
"""
mu = np.mean(xx)
N = len(xx)
# TODO 3: review
if (not np.isfinite(mu)) or N <= 5:
uq = UncertainQtty(mu, np.nan)
elif np.allclose(xx, mu):
uq = UncertainQtty(mu, 0)
else:
acovf = auto_cov(xx)
var = acovf[0]
var /= N
# Estimate (fit) ACF
a = fit_acf_by_AR1(acovf)
# If xx[k] where independent of xx[k-1],
# then std_of_mu is the end of the story.
# The following corrects for the correlation in the time series.
#
# See https://stats.stackexchange.com/q/90062
# c = sum([(N-k)*a**k for k in range(1,N)])
# But this series is analytically tractable:
c = ((N-1)*a - N*a**2 + a**(N+1)) / (1-a)**2
confidence_correction = 1 + 2/N * c
var *= confidence_correction
uq = UncertainQtty(mu, np.sqrt(var))
return uq
class StatPrint(NicePrint):
"""Set `NicePrint` options suitable for stats."""
printopts = dict(
excluded=NicePrint.printopts["excluded"]+["HMM", "LP_instance"],
ordering="linenumber",
reverse=True,
indent=2,
aliases={
'f': 'Forecast (.f)',
'a': 'Analysis (.a)',
's': 'Smoothed (.s)',
'u': 'Universal (.u)',
'm': 'Field mean (.m)',
'ma': 'Field mean-abs (.ma)',
'rms': 'Field root-mean-square (.rms)',
'gm': 'Field geometric-mean (.gm)',
},
)
# Adjust np.printoptions before NicePrint
def __repr__(self):
with np.printoptions(threshold=10, precision=3):
return super().__repr__()
def __str__(self):
with np.printoptions(threshold=10, precision=3):
return super().__str__()
def monitor_setitem(cls):
"""Modify cls to track of whether its `__setitem__` has been called.
See sub.py for a sublcass solution (drawback: creates a new class).
"""
orig_setitem = cls.__setitem__
def setitem(self, key, val):
orig_setitem(self, key, val)
self.were_changed = True
cls.__setitem__ = setitem
# Using class var for were_changed => don't need explicit init
cls.were_changed = False
if issubclass(cls, NicePrint):
cls.printopts['excluded'] = \
cls.printopts.get('excluded', []) + ['were_changed']
return cls
@monitor_setitem
class DataSeries(StatPrint):
"""Basically just an `np.ndarray`. But adds:
- Possibility of adding attributes.
- The class (type) provides way to acertain if an attribute is a series.
Note: subclassing `ndarray` is too dirty => We'll just use the
`array` attribute, and provide `{s,g}etitem`.
"""
def __init__(self, shape, **kwargs):
self.array = np.full(shape, nan, **kwargs)
def __len__(self): return len(self.array)
def __getitem__(self, key): return self.array[key]
def __setitem__(self, key, val): self.array[key] = val
@monitor_setitem
class FAUSt(DataSeries, StatPrint):
"""Container for time series of a statistic from filtering.
Four attributes, each of which is an ndarray:
- `.f` for forecast , `(KObs+1,)+item_shape`
- `.a` for analysis , `(KObs+1,)+item_shape`
- `.s` for smoothed , `(KObs+1,)+item_shape`
- `.u` for universial/all, `(K +1,)+item_shape`
If `store_u=False`, then `.u` series has shape `(1,)+item_shape`,
wherein only the most-recently-written item is stored.
Series can also be indexed as in
self[kObs,'a']
self[whatever,kObs,'a']
# ... and likewise for 'f' and 's'. For 'u', can use:
self[k,'u']
self[k,whatever,'u']
.. note:: If a data series only pertains to analysis times,
then you should use a plain np.array instead.
"""
def __init__(self, K, KObs, item_shape, store_u, store_s, **kwargs):
"""Construct object.
- `item_shape` : shape of an item in the series.
- `store_u` : if False: only the current value is stored.
- `kwargs` : passed on to ndarrays.
"""
self.f = np.full((KObs+1,)+item_shape, nan, **kwargs)
self.a = np.full((KObs+1,)+item_shape, nan, **kwargs)
if store_s:
self.s = np.full((KObs+1,)+item_shape, nan, **kwargs)
if store_u:
self.u = np.full((K + 1,)+item_shape, nan, **kwargs)
else:
self.u = np.full((1,)+item_shape, nan, **kwargs)
# We could just store the input values for these attrs, but using
# property => Won't be listed in vars(self), and un-writeable.
item_shape = property(lambda self: self.a.shape[1:])
store_u = property(lambda self: len(self.u) > 1)
def _ind(self, key):
"""Aux function to unpack `key` (`k,kObs,faus`)"""
if key[-1] == 'u':
return key[0] if self.store_u else 0
else:
return key[-2]
def __setitem__(self, key, item):
getattr(self, key[-1])[self._ind(key)] = item
def __getitem__(self, key):
return getattr(self, key[-1])[self._ind(key)]
class RollingArray:
"""ND-Array that implements "leftward rolling" along axis 0.
Used for data that gets plotted in sliding graphs.
"""
def __init__(self, shape, fillval=nan):
self.array = np.full(shape, fillval)
self.k1 = 0 # previous k
self.nFilled = 0
def insert(self, k, val):
dk = k-self.k1
# Old (more readable?) version:
# if dk in [0,1]: # case: forecast or analysis update
# self.array = np.roll(self.array, -1, axis=0)
# elif dk>1: # case: user has skipped ahead (w/o liveplotting)
# self.array = np.roll(self.array, -dk, axis=0)
# self.array[-dk:] = nan
# self.array[-1] = val
dk = max(1, dk)
# TODO 7: Should have used deque?
self.array = np.roll(self.array, -dk, axis=0)
self.array[-dk:] = nan
self.array[-1:] = val
self.k1 = k
self.nFilled = min(len(self), self.nFilled+dk)
def leftmost(self):
return self[len(self)-self.nFilled]
def span(self):
return (self.leftmost(), self[-1])
@property
def T(self):
return self.array.T
def __array__(self, _dtype=None): return self.array
def __len__(self): return len(self.array)
def __repr__(self): return 'RollingArray:\n%s' % str(self.array)
def __getitem__(self, key): return self.array[key]
def __setitem__(self, key, val):
# Don't implement __setitem__ coz leftmost() is then
# not generally meaningful (i.e. if an element is set in the middle).
# Of course self.array can still be messed with.
raise AttributeError("Values should be set with update()")
| 30.599327 | 77 | 0.591989 | 1,295 | 9,088 | 4.027799 | 0.273359 | 0.036235 | 0.019172 | 0.016104 | 0.150498 | 0.117906 | 0.113305 | 0.096434 | 0.081288 | 0.081288 | 0 | 0.01528 | 0.272667 | 9,088 | 296 | 78 | 30.702703 | 0.773828 | 0.372469 | 0 | 0.116438 | 0 | 0 | 0.04988 | 0 | 0 | 0 | 0 | 0.006757 | 0.006849 | 1 | 0.191781 | false | 0 | 0.034247 | 0.082192 | 0.383562 | 0.041096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc5f02f1f059fd65da26d3feef5967ff60b2ed0 | 2,566 | py | Python | display/models.py | SmithChebesta/uni-database | 7394c5652f5cd260593951fca08905201ed07d3b | [
"MIT"
] | null | null | null | display/models.py | SmithChebesta/uni-database | 7394c5652f5cd260593951fca08905201ed07d3b | [
"MIT"
] | 8 | 2020-06-06T00:28:14.000Z | 2022-02-10T13:59:20.000Z | display/models.py | SmithChebesta/uni-database | 7394c5652f5cd260593951fca08905201ed07d3b | [
"MIT"
] | null | null | null | from django.db import models
import datetime
class Tag(models.Model):
tag = models.TextField(primary_key=True)
description = models.TextField(null=True, blank=True, default=None)
class Customer(models.Model):
customer_name = models.TextField()
customer_id = models.TextField(primary_key=True)
tag = models.ForeignKey("Tag", on_delete=models.CASCADE)
distributor_name = models.TextField(blank=True, default=None, null=True)
accounting_name = models.TextField(blank=True, default=None, null=True)
accounting_contact = models.TextField(blank=True, default=None, null=True)
technical_name = models.TextField(blank=True, default=None, null=True)
technical_contact = models.TextField(blank=True, default=None, null=True)
class Webapp (models.Model):
system_name = models.TextField(primary_key=True)
customer_id = models.ForeignKey("Customer", on_delete=models.CASCADE)
product = models.TextField()
super_admin_id = models.TextField()
super_admin_password = models.TextField()
url = models.URLField(max_length=200)
drive = models.URLField(max_length=200, blank=True,
default=None, null=True)
max_users = models.IntegerField()
organizationID = models.TextField()
status = models.BooleanField(default=False)
class service(models.Model):
customer_id = models.ForeignKey("Customer", on_delete=models.CASCADE)
system_name = models.OneToOneField( "Webapp", on_delete=models.CASCADE)
service_start_date = models.DateField()
service_end_date = models.DateField()
product_type = models.TextField(null=True, blank=True)
service_type = models.TextField(null=True, blank=True)
@property
def status(self):
now = datetime.date.today()
return self.service_start_date < now and now < self.service_end_date
@property
def duration(self):
return f'{(self.service_end_date - self.service_start_date).days} days'
class Gateway(models.Model):
gateway_id = models.TextField(primary_key=True)
customer_id = models.ForeignKey("Customer", on_delete=models.CASCADE)
system_name = models.ForeignKey(
"service", on_delete=models.CASCADE, blank=True, null=True)
uid_list = models.TextField()
IMEI_MAC = models.TextField()
refresh_rate = models.FloatField()
firmware_upgrade = models.TextField(blank=True, default=None, null=True)
moblie_number = models.TextField(blank=True, default=None, null=True)
max_sms = models.IntegerField(blank=True, default=0, null=True)
| 38.878788 | 79 | 0.726422 | 321 | 2,566 | 5.647975 | 0.242991 | 0.173745 | 0.088252 | 0.099283 | 0.480971 | 0.436293 | 0.400441 | 0.341975 | 0.294539 | 0.20353 | 0 | 0.003256 | 0.16212 | 2,566 | 65 | 80 | 39.476923 | 0.84 | 0 | 0 | 0.098039 | 0 | 0 | 0.039361 | 0.020655 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0.019608 | 0.039216 | 0.019608 | 0.901961 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc89cd542378ad659c54f89c08a369cc8f703da | 210 | py | Python | learning/haarcascades/tutorial/pos_img_resize.py | Tomspiano/ImageProcessing | daea6a230463a49f13b7432e8e5d5e5de1958d40 | [
"Apache-2.0"
] | 2 | 2020-10-24T15:50:41.000Z | 2020-10-25T08:46:11.000Z | learning/haarcascades/tutorial/pos_img_resize.py | Tomspiano/ImageProcessing | daea6a230463a49f13b7432e8e5d5e5de1958d40 | [
"Apache-2.0"
] | 3 | 2020-06-04T18:27:56.000Z | 2020-06-04T18:44:30.000Z | learning/haarcascades/tutorial/pos_img_resize.py | Tomspiano/Introduction-to-OpenCV | daea6a230463a49f13b7432e8e5d5e5de1958d40 | [
"Apache-2.0"
] | null | null | null | import cv2
path = input('path: ') # pen.jpg
w = eval(input('width: ')) # 50
h = eval(input('height: ')) # 50
size = (w, h)
img = cv2.imread(path)
resized = cv2.resize(img, size)
cv2.imwrite(path, resized)
| 19.090909 | 33 | 0.614286 | 33 | 210 | 3.909091 | 0.545455 | 0.139535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046512 | 0.180952 | 210 | 10 | 34 | 21 | 0.703488 | 0.061905 | 0 | 0 | 0 | 0 | 0.108808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc8af17ca466f4852c1868465a0d9790d573121 | 19,747 | py | Python | storyruntime/processing/Lexicon.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
] | null | null | null | storyruntime/processing/Lexicon.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
] | null | null | null | storyruntime/processing/Lexicon.py | adnrs96/runtime | e824224317e6aa108cf06968474fc44fa33488d6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
import time
from .Mutations import Mutations
from .Services import Services
from .. import Metrics
from ..Exceptions import InvalidKeywordUsage, \
StoryscriptError, StoryscriptRuntimeError
from ..Story import Story
from ..Types import StreamingService
from ..constants import ContextConstants
from ..constants.LineConstants import LineConstants
from ..constants.LineSentinels import LineSentinels, ReturnSentinel
from ..utils import Resolver
class Lexicon:
"""
Lexicon of possible line actions and their implementation
"""
@staticmethod
async def execute(logger, story, line):
"""
Runs a service with the resolution values as commands
"""
service = line[LineConstants.service]
start = time.time()
if line.get('enter') is not None:
"""
When a service to be executed has an 'enter' line number,
it's a streaming service. Let's bring up the service and
update the context with the output name.
Example:
foo stream as client
when client grep:'bar' as result
# do something with result
"""
output = await Services.start_container(story, line)
Metrics.container_start_seconds_total.labels(
app_id=story.app.app_id,
story_name=story.name, service=service
).observe(time.time() - start)
story.end_line(line['ln'], output=output,
assign={'paths': line.get('output')})
return Lexicon.line_number_or_none(story.line(line.get('next')))
else:
output = await Services.execute(story, line)
Metrics.container_exec_seconds_total.labels(
app_id=story.app.app_id,
story_name=story.name, service=service
).observe(time.time() - start)
if line.get('name') and len(line['name']) == 1:
story.end_line(line['ln'], output=output,
assign={'paths': line['name']})
else:
story.end_line(line['ln'], output=output,
assign=line.get('output'))
return Lexicon.line_number_or_none(story.line(line.get('next')))
@staticmethod
async def execute_line(logger, story, line_number):
"""
Executes a single line by calling the Lexicon for various operations.
To execute a function completely, see Lexicon#call.
:return: Returns the next line number to be executed
(return value from Lexicon), or None if there is none.
"""
line: dict = story.line(line_number)
story.start_line(line_number)
with story.new_frame(line_number):
try:
method = line['method']
if method == 'if' or method == 'else' or method == 'elif':
return await Lexicon.if_condition(logger, story, line)
elif method == 'for':
return await Lexicon.for_loop(logger, story, line)
elif method == 'execute':
return await Lexicon.execute(logger, story, line)
elif method == 'set' or method == 'expression' \
or method == 'mutation':
return await Lexicon.set(logger, story, line)
elif method == 'call':
return await Lexicon.call(logger, story, line)
elif method == 'function':
return await Lexicon.function(logger, story, line)
elif method == 'when':
return await Lexicon.when(logger, story, line)
elif method == 'return':
return await Lexicon.ret(logger, story, line)
elif method == 'break':
return await Lexicon.break_(logger, story, line)
elif method == 'continue':
return await Lexicon.continue_(logger, story, line)
elif method == 'while':
return await Lexicon.while_(logger, story, line)
elif method == 'try':
return await Lexicon.try_catch(logger, story, line)
elif method == 'throw':
return await Lexicon.throw(logger, story, line)
else:
raise NotImplementedError(
f'Unknown method to execute: {method}'
)
except BaseException as e:
# Don't wrap StoryscriptError.
if isinstance(e, StoryscriptError):
e.story = story # Always set.
e.line = line # Always set.
raise e
raise StoryscriptRuntimeError(
message='Failed to execute line',
story=story, line=line, root=e)
@staticmethod
async def execute_block(logger, story, parent_line: dict):
"""
Executes all the lines whose parent is parent_line, and returns
either one of the following:
1. A sentinel (from LineSentinels) - if this was returned by execute()
2. None in all other cases
The result can have special significance, such as the BREAK
line sentinel.
"""
next_line = story.line(parent_line['enter'])
# If this block represents a streaming service, copy over it's
# output to the context, so that Lexicon can read it later.
if parent_line.get('output') is not None \
and parent_line.get('method') == 'when':
story.context[ContextConstants.service_output] = \
parent_line['output'][0]
if story.context.get(ContextConstants.service_event) is not None:
story.context[parent_line['output'][0]] = \
story.context[ContextConstants.service_event].get('data')
while next_line is not None \
and story.line_has_parent(parent_line['ln'], next_line):
result = await Lexicon.execute_line(logger, story, next_line['ln'])
if LineSentinels.is_sentinel(result):
return result
next_line = story.line(result)
return None
@staticmethod
async def function(logger, story, line):
"""
Functions are not executed when they're encountered.
This method returns the next block's line number,
if there are more statements to be executed.
"""
return Lexicon.line_number_or_none(story.next_block(line))
@staticmethod
async def call(logger, story, line):
"""
Calls a particular function indicated by the line.
This will setup a new context for the
function block to be executed, and will return the output (if any).
"""
current_context = story.context
function_line = story.function_line_by_name(line.get('function'))
context = story.context_for_function_call(line, function_line)
return_from_function_call = None
try:
story.set_context(context)
result = await Lexicon.execute_block(logger, story, function_line)
if LineSentinels.is_sentinel(result):
if not isinstance(result, ReturnSentinel):
raise StoryscriptRuntimeError(
f'Uncaught sentinel has'
f' escaped! sentinel={result}'
)
return_from_function_call = result.return_value
return Lexicon.line_number_or_none(story.line(line.get('next')))
finally:
story.set_context(current_context)
if line.get('name') is not None and len(line['name']) > 0:
story.end_line(line['ln'],
output=return_from_function_call,
assign={
'$OBJECT': 'path', 'paths': line['name']})
@staticmethod
def _does_line_have_parent_method(story, line, parent_method_wanted):
# Just walk up the stack using 'parent'.
while True:
parent_line = line.get('parent')
if parent_line is None:
return False
parent_line = story.line(parent_line)
if parent_line['method'] == parent_method_wanted:
return True
else:
line = parent_line
@staticmethod
async def break_(logger, story, line):
# Ensure that we're in a foreach loop. If we are, return BREAK,
# otherwise raise an exception.
if Lexicon._does_line_have_parent_method(story, line, 'for'):
return LineSentinels.BREAK
else:
# There is no parent, this is an illegal usage of break.
raise InvalidKeywordUsage(story, line, 'break')
@staticmethod
async def continue_(logger, story, line):
# Ensure that we're in a foreach loop. If we are, return CONTINUE,
# otherwise raise an exception.
if Lexicon._does_line_have_parent_method(story, line, 'for') or \
Lexicon._does_line_have_parent_method(story, line, 'while'):
return LineSentinels.CONTINUE
else:
# There is no parent, this is an illegal usage of continue.
raise InvalidKeywordUsage(story, line, 'continue')
@staticmethod
def line_number_or_none(line):
if line:
return line['ln']
return None
@staticmethod
async def set(logger, story, line):
value = story.resolve(line['args'][0])
if len(line['args']) > 1:
# Check if args[1] is a mutation.
if line['args'][1]['$OBJECT'] == 'mutation':
value = Mutations.mutate(line['args'][1], value, story, line)
logger.debug(f'Mutation result: {value}')
else:
raise StoryscriptError(
message=f'Unsupported argument in set: '
f'{line["args"][1]["$OBJECT"]}',
story=story, line=line)
story.end_line(line['ln'], output=value,
assign={'$OBJECT': 'path', 'paths': line['name']})
return Lexicon.line_number_or_none(story.line(line.get('next')))
@staticmethod
def _is_if_condition_true(story, line):
if len(line['args']) != 1:
raise StoryscriptError(
message=f'Complex if condition found! '
f'len={len(line["args"])}',
story=story, line=line)
return story.resolve(line['args'][0], encode=False)
@staticmethod
async def if_condition(logger, story, line):
"""
Evaluates the resolution value to decide whether to enter
inside an if-block.
Execution strategy:
1. Evaluate the if condition. If true, return the 'enter' line number
2. If the condition is false, find next elif, and perform step 1
3. If we reach an else block, perform step 1 without condition check
Since the entire if/elif/elif/else block execution happens here,
we can ignore all subsequent elif/else calls, and just return the
next block.
"""
if line['method'] == 'elif' or line['method'] == 'else':
# If something had to be executed in this if/elif/else block, it
# would have been executed already. See execution strategy above.
return Lexicon.line_number_or_none(story.next_block(line))
# while true here because all if/elif/elif/else is executed here.
while True:
logger.log('lexicon-if', line, story.context)
if line['method'] == 'else':
result = True
else:
result = Lexicon._is_if_condition_true(story, line)
if result:
return line['enter']
else:
# Check for an elif block or an else block
# (step 2 of execution strategy).
next_line = story.next_block(line)
if next_line is None:
return None
# Ensure that the elif/else is in the same parent.
if next_line.get('parent') == line.get('parent') and \
(next_line['method'] == 'elif' or
next_line['method'] == 'else'):
# Continuing this loop will mean that step 1 in the
# execution strategy is performed.
line = next_line
continue
else:
# Next block is not a part of the if/elif/else.
return Lexicon.line_number_or_none(next_line)
# Note: Control can NEVER reach here.
@staticmethod
def unless_condition(logger, story, line):
logger.log('lexicon-unless', line, story.context)
result = story.resolve(line['args'][0], encode=False)
if result:
return line['exit']
return line['enter']
@staticmethod
async def try_catch(logger, story, line):
"""
Executes the try/catch/finally construct. If any StoryscriptError
exception is thrown by the try block, the catch block will be
invoked. However, if the error is not of type StoryscriptError,
then it will be thrown up directly - in this case, the finally
block will not be executed either (since the error that
occurred is not a StoryscriptError, but rather a programming
error in the runtime).
:return: Returns the line to be executed immediately after
the catch block or finally block.
"""
next_line = story.next_block(line)
if next_line is None:
return None
async def next_block_or_finally():
"""
This will execute if the next block is a finally block.
It happens because the lexicon should always execute
a finally block when there's a StoryscriptError
:return: Returns the next line to be executed.
"""
if next_line['method'] != 'finally':
last_block = story.next_block(next_line)
else:
last_block = next_line
if last_block is not None and \
last_block['method'] == 'finally':
await Lexicon.execute_block(logger, story,
last_block)
last_block = story.next_block(last_block)
return Lexicon.line_number_or_none(last_block)
try:
await Lexicon.execute_block(logger, story, line)
except StoryscriptError as e:
if next_line['method'] == 'finally':
# skip right to the finally block
return await next_block_or_finally()
try:
await Lexicon.execute_block(logger, story, next_line)
except StoryscriptError as re:
# if the catch block contains a StoryscriptError,
# we must catch it, and run the finally
# block anyway, followed up by raising the
# exception
await next_block_or_finally()
raise re
return await next_block_or_finally()
@staticmethod
def throw(logger, story, line):
if line['args'] is not None and \
len(line['args']) > 0:
err_str = story.resolve(line['args'][0])
else:
err_str = None
raise StoryscriptError(message=err_str, story=story, line=line)
@staticmethod
async def for_loop(logger, story, line):
"""
Evaluates a for loop.
"""
_list = story.resolve(line['args'][0], encode=False)
output = line['output'][0]
try:
for item in _list:
story.context[output] = item
result = await Lexicon.execute_block(logger, story, line)
if LineSentinels.BREAK == result:
break
if LineSentinels.CONTINUE == result:
continue
elif LineSentinels.is_sentinel(result):
# We do not know what to do with this sentinel,
# so bubble it up.
return result
finally:
# Don't leak the variable to the outer scope.
del story.context[output]
# Use story.next_block(line), because line["exit"] is unreliable...
return Lexicon.line_number_or_none(story.next_block(line))
@staticmethod
async def while_(logger, story, line):
call_count = 0
while Resolver.resolve(line['args'][0], story.context):
# note this is only a temporary solution,
# and we will address this in the future.
if call_count >= 100000:
raise StoryscriptRuntimeError(
message='Call count limit reached within while loop. '
'Only 100000 iterations allowed.',
story=story, line=line
)
result = await Lexicon.execute_block(logger, story, line)
if call_count % 10 == 0:
# Let's sleep so we don't take up 100% of the CPU
await asyncio.sleep(0.0002)
call_count += 1
if result == LineSentinels.CONTINUE:
continue
elif result == LineSentinels.BREAK:
break
elif LineSentinels.is_sentinel(result):
return result
return Lexicon.line_number_or_none(story.next_block(line))
@staticmethod
async def when(logger, story, line):
service = line[LineConstants.service]
# Does this service belong to a streaming service?
s = story.context.get(service)
if isinstance(s, StreamingService):
# Yes, we need to subscribe to an event with the service.
await Services.when(s, story, line)
next_line = story.next_block(line)
return Lexicon.line_number_or_none(next_line)
else:
raise StoryscriptError(
message=f'Unknown service {service} for when!',
story=story, line=line)
@classmethod
async def ret(cls, logger, story: Story, line):
"""
Implementation for return.
The semantics for return are as follows:
1. Stops execution and returns from the nearest when or function block
Returns can happen in two types of blocks:
1. From when blocks - no value may be returned
2. From function blocks - one value may be returned
"""
args = line.get('args', line.get('arguments'))
if args is None:
args = []
if cls._does_line_have_parent_method(story, line, 'when'):
assert len(args) == 0, \
'return may not be used with a value in a when block'
return LineSentinels.RETURN
elif cls._does_line_have_parent_method(story, line, 'function'):
returned_value = None
if len(args) > 0:
assert len(args) == 1, 'multiple return values are not allowed'
returned_value = story.resolve(args[0])
return ReturnSentinel(return_value=returned_value)
else:
# There is no parent, this is an illegal usage of return.
raise InvalidKeywordUsage(story, line, 'return')
| 38.719608 | 79 | 0.567732 | 2,245 | 19,747 | 4.889532 | 0.14833 | 0.050014 | 0.040995 | 0.017491 | 0.285506 | 0.203243 | 0.169172 | 0.141295 | 0.121071 | 0.112508 | 0 | 0.004663 | 0.348357 | 19,747 | 509 | 80 | 38.795678 | 0.848384 | 0.08771 | 0 | 0.318471 | 0 | 0 | 0.065727 | 0.003456 | 0 | 0 | 0 | 0 | 0.006369 | 1 | 0.015924 | false | 0 | 0.038217 | 0 | 0.197452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acc9223ec7ba974ad2d757345536226e1ad0e172 | 4,691 | py | Python | ffsc/pipeline/nodes/preprocess.py | Lkruitwagen/global-fossil-fuel-supply-chain | f5d804a5f7cee19af46d2f31e635590d3930bacd | [
"MIT"
] | 16 | 2021-02-11T10:30:13.000Z | 2021-11-05T09:46:47.000Z | ffsc/pipeline/nodes/preprocess.py | Lkruitwagen/global-fossil-fuel-supply-chain | f5d804a5f7cee19af46d2f31e635590d3930bacd | [
"MIT"
] | 3 | 2020-02-20T10:00:27.000Z | 2020-03-10T00:34:11.000Z | ffsc/pipeline/nodes/preprocess.py | Lkruitwagen/global-energy-demand | f5d804a5f7cee19af46d2f31e635590d3930bacd | [
"MIT"
] | 3 | 2021-04-06T08:27:07.000Z | 2021-11-05T09:51:45.000Z | import logging, sys, json
#import geopandas as gpd
import pandas as pd
from shapely import geometry
from tqdm import tqdm
tqdm.pandas()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def do_preprocess(gdf, idx_column, idx_prefix, feature_columns, logger):
gdf = pd.DataFrame(gdf).reset_index().rename(columns={idx_column:'unique_id'})
logger.info('Doing geometry to str:')
gdf['geometry'] = gdf['geometry'].progress_apply(lambda el: el.wkt)
if not feature_columns:
gdf['features'] = [json.dumps(dict())]*len(gdf)
else:
gdf['features'] = gdf[feature_columns].to_dict(orient='records')
logger.info('Doing Features column:')
gdf['features'] = gdf['features'].progress_apply(lambda el: json.dumps(el))
gdf['unique_id'] = idx_prefix + '_' + gdf['unique_id'].astype(str)
return gdf[['unique_id','features','geometry']]
def preprocess_shippingroutes(gdf):
logger = logging.getLogger('preprocess_shippingroutes')
return do_preprocess(gdf, 'index','SHIPPINGROUTE',None, logger)
def preprocess_ports(gdf):
logger = logging.getLogger('preprocess_ports')
return do_preprocess(gdf, 'index','PORT',None, logger)
def preprocess_pipelines(fc):
logger = logging.getLogger('preprocess_pipelines')
logger.info(f'Len fts: {len(fc["features"])}')
records = []
for ii_f, ft in enumerate(tqdm(fc['features'])):
records.append(dict(
unique_id='PIPELINE_'+str(ii_f),
features=json.dumps({}),
geometry=geometry.shape(ft['geometry']).wkt
)
)
return pd.DataFrame.from_records(records)
def preprocess_coalmines(gdf):
logger = logging.getLogger('preprocess_coalmines')
return do_preprocess(gdf, 'index','COALMINE',None, logger)
def preprocess_oilfields(gdf):
logger = logging.getLogger('preprocess_oilfields')
return do_preprocess(gdf, 'index','OILFIELD',None, logger)
def preprocess_lngterminals(gdf):
logger = logging.getLogger('preprocess_lngterminals')
return do_preprocess(gdf, 'index','LNGTERMINAL',None, logger)
def preprocess_powerstations(gdf):
logger = logging.getLogger('preprocess_powerstations')
gdf = gdf[gdf[['fuel1','fuel2','fuel3','fuel4']].isin(['Gas','Oil','Coal']).any(axis=1)]
gdf = gdf[~((gdf['latitude']>90) | (gdf['latitude']<-90) | (gdf['longitude']<-180) | (gdf['longitude']>180))]
return do_preprocess(gdf, 'index','POWERSTATION',['capacity_mw','fuel1','fuel2','fuel3','fuel4'], logger)
def preprocess_railways(fc):
logger = logging.getLogger('preprocess_railways')
logger.info(f'Len fts: {len(fc["features"])}')
records = []
for ii_f, ft in enumerate(tqdm(fc['features'])):
records.append(dict(
unique_id='RAILWAY_'+str(ii_f),
features=json.dumps({}),
geometry=geometry.shape(ft['geometry']).wkt
)
)
return pd.DataFrame.from_records(records)
def preprocess_refineries(gdf_refineries,gdf_processingplants):
logger = logging.getLogger('preprocess_refineries')
gdf = pd.concat([gdf_refineries,gdf_processingplants])
gdf['new_index'] = range(len(gdf))
return do_preprocess(gdf, 'new_index','REFINERY',None, logger)
def preprocess_oilwells(gdf):
logger = logging.getLogger('preprocess_oilwells')
return do_preprocess(gdf, 'index','OILWELL',None, logger)
def preprocess_cities_base(gdf):
logger = logging.getLogger('preprocess_cities')
gdf = pd.DataFrame(gdf).reset_index().rename(columns={'index':'unique_id'})
gdf['unique_id'] = 'CITY' + '_' + gdf['unique_id'].astype(str)
logger.info('Doing geometry to str:')
gdf['geometry'] = gdf['geom_gj'].progress_apply(lambda el: geometry.shape(el).wkt)
#gdf = gdf.rename(columns={'geom_gj':'features'})
#logger.info('Doing small geometry to str:')
gdf['features'] = [json.dumps(dict())]*len(gdf)#gdf['features'].progress_apply(lambda el: geometry.shape(el).wkt)
return gdf[['unique_id','geometry','features']]
def preprocess_cities_euclid(gdf):
logger = logging.getLogger('preprocess_cities')
gdf = pd.DataFrame(gdf).reset_index().rename(columns={'index':'unique_id'})
gdf['unique_id'] = 'CITY' + '_' + gdf['unique_id'].astype(str)
logger.info('Doing geometry to str:')
gdf['geometry'] = gdf['geometry'].progress_apply(lambda el: el.wkt)
gdf = gdf.rename(columns={'geom_gj':'features'})
logger.info('Doing small geometry to str:')
gdf['features'] = gdf['features'].progress_apply(lambda el: geometry.shape(el).wkt)
return gdf | 36.084615 | 117 | 0.667022 | 570 | 4,691 | 5.338596 | 0.198246 | 0.034177 | 0.086756 | 0.126191 | 0.594808 | 0.425567 | 0.425567 | 0.409464 | 0.371015 | 0.371015 | 0 | 0.004886 | 0.170966 | 4,691 | 130 | 118 | 36.084615 | 0.777578 | 0.038158 | 0 | 0.306818 | 0 | 0 | 0.213351 | 0.02994 | 0.022727 | 0 | 0 | 0 | 0 | 1 | 0.147727 | false | 0 | 0.045455 | 0 | 0.340909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acca1ff99e5070fe8085401cf01be96b66e1d473 | 43,022 | py | Python | src/probabilistic_modeling/probabilistic_generalized_rcnn.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 50 | 2021-01-14T03:44:03.000Z | 2022-03-28T12:27:22.000Z | src/probabilistic_modeling/probabilistic_generalized_rcnn.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 3 | 2021-01-15T22:39:03.000Z | 2021-09-22T15:52:03.000Z | src/probabilistic_modeling/probabilistic_generalized_rcnn.py | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 | [
"Apache-2.0"
] | 8 | 2021-02-03T02:55:50.000Z | 2022-02-16T14:30:31.000Z | import logging
import numpy as np
import torch
from typing import Dict, List, Union, Optional, Tuple
from torch.nn import functional as F
from torch import nn, distributions
# Detectron imports
import fvcore.nn.weight_init as weight_init
from detectron2.config import configurable
from detectron2.layers import Linear, ShapeSpec, cat, Conv2d, get_norm
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.modeling.roi_heads.box_head import ROI_BOX_HEAD_REGISTRY
from detectron2.structures import Boxes, Instances, ImageList
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from fvcore.nn import smooth_l1_loss
# Project imports
from probabilistic_inference.inference_utils import get_dir_alphas
from probabilistic_modeling.modeling_utils import get_probabilistic_loss_weight, clamp_log_variance, covariance_output_to_cholesky
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
@META_ARCH_REGISTRY.register()
class ProbabilisticGeneralizedRCNN(GeneralizedRCNN):
"""
Probabilistic GeneralizedRCNN class.
"""
def __init__(self, cfg):
super().__init__(cfg)
# Parse configs
self.cls_var_loss = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME
self.compute_cls_var = self.cls_var_loss != 'none'
self.cls_var_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES
self.bbox_cov_loss = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME
self.compute_bbox_cov = self.bbox_cov_loss != 'none'
self.bbox_cov_num_samples = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES
self.bbox_cov_type = cfg.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE
if self.bbox_cov_type == 'diagonal':
# Diagonal covariance matrix has N elements
self.bbox_cov_dims = 4
else:
# Number of elements required to describe an NxN covariance matrix is
# computed as: (N * (N + 1)) / 2
self.bbox_cov_dims = 10
self.dropout_rate = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
self.use_dropout = self.dropout_rate != 0.0
self.num_mc_dropout_runs = -1
self.current_step = 0
# Define custom probabilistic head
self.roi_heads.box_predictor = ProbabilisticFastRCNNOutputLayers(
cfg,
self.roi_heads.box_head.output_shape,
self.compute_cls_var,
self.cls_var_loss,
self.cls_var_num_samples,
self.compute_bbox_cov,
self.bbox_cov_loss,
self.bbox_cov_type,
self.bbox_cov_dims,
self.bbox_cov_num_samples)
# Send to device
self.to(self.device)
def forward(self,
batched_inputs,
return_anchorwise_output=False,
num_mc_dropout_runs=-1):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
return_anchorwise_output (bool): returns raw output for probabilistic inference
num_mc_dropout_runs (int): perform efficient monte-carlo dropout runs by running only the head and
not full neural network.
Returns:
dict[str: Tensor]:
mapping from a named loss to a tensor storing the loss. Used during training only.
"""
if not self.training and num_mc_dropout_runs == -1:
if return_anchorwise_output:
return self.produce_raw_output(batched_inputs)
else:
return self.inference(batched_inputs)
elif self.training and num_mc_dropout_runs > 1:
self.num_mc_dropout_runs = num_mc_dropout_runs
output_list = []
for i in range(num_mc_dropout_runs):
output_list.append(self.produce_raw_output(batched_inputs))
return output_list
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [
x["instances"].to(
self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN,
"'targets' in the model inputs is now renamed to 'instances'!",
n=10)
gt_instances = [x["targets"].to(self.device)
for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(
images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device)
for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(
images, features, proposals, gt_instances, current_step=self.current_step)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
self.current_step += 1
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def produce_raw_output(self, batched_inputs, detected_instances=None):
"""
Run inference on the given inputs and return proposal-wise output for later postprocessing.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
Returns:
same as in :meth:`forward`.
"""
raw_output = dict()
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [
x["proposals"].to(
self.device) for x in batched_inputs]
# Create raw output dictionary
raw_output.update({'proposals': proposals[0]})
results, _ = self.roi_heads(
images, features, proposals, None, produce_raw_output=True, num_mc_dropout_runs=self.num_mc_dropout_runs)
else:
detected_instances = [x.to(self.device)
for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(
features, detected_instances)
box_cls, box_delta, box_cls_var, box_reg_var = results
raw_output.update({'box_cls': box_cls,
'box_delta': box_delta,
'box_cls_var': box_cls_var,
'box_reg_var': box_reg_var})
return raw_output
@ROI_HEADS_REGISTRY.register()
class ProbabilisticROIHeads(StandardROIHeads):
"""
Probabilistic ROI heads, inherit from standard ROI heads so can be used with mask RCNN in theory.
"""
def __init__(self, cfg, input_shape):
super(ProbabilisticROIHeads, self).__init__(cfg, input_shape)
self.is_mc_dropout_inference = False
self.produce_raw_output = False
self.current_step = 0
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
num_mc_dropout_runs=-1,
produce_raw_output=False,
current_step=0.0,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
"""
See :class:`ROIHeads.forward`.
"""
self.is_mc_dropout_inference = num_mc_dropout_runs > 1
self.produce_raw_output = produce_raw_output
self.current_step = current_step
del images
if self.training and not self.is_mc_dropout_inference:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training and not self.is_mc_dropout_inference:
losses = self._forward_box(features, proposals)
# Usually the original proposals used by the box head are used by the mask, keypoint
# heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
# predicted by the box head.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
if self.produce_raw_output:
return pred_instances, {}
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(
features, pred_instances)
return pred_instances, {}
def _forward_box(
self, features: Dict[str, torch.Tensor], proposals: List[Instances]
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
"""
Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(
features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if self.produce_raw_output:
return predictions
if self.training:
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes):
proposals_per_image.proposal_boxes = Boxes(
pred_boxes_per_image)
return self.box_predictor.losses(
predictions, proposals, self.current_step)
else:
pred_instances, _ = self.box_predictor.inference(
predictions, proposals)
return pred_instances
class ProbabilisticFastRCNNOutputLayers(nn.Module):
"""
Four linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
(3) box regression deltas covariance parameters (if needed)
(4) classification logits variance (if needed)
"""
@configurable
def __init__(
self,
input_shape,
*,
box2box_transform,
num_classes,
cls_agnostic_bbox_reg=False,
smooth_l1_beta=0.0,
test_score_thresh=0.0,
test_nms_thresh=0.5,
test_topk_per_image=100,
compute_cls_var=False,
compute_bbox_cov=False,
bbox_cov_dims=4,
cls_var_loss='none',
cls_var_num_samples=10,
bbox_cov_loss='none',
bbox_cov_type='diagonal',
dropout_rate=0.0,
annealing_step=0,
bbox_cov_num_samples=1000
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature to this module
box2box_transform (Box2BoxTransform or Box2BoxTransformRotated):
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
smooth_l1_beta (float): transition point from L1 to L2 loss.
test_score_thresh (float): threshold to filter predictions results.
test_nms_thresh (float): NMS threshold for prediction results.
test_topk_per_image (int): number of top predictions to produce per image.
compute_cls_var (bool): compute classification variance
compute_bbox_cov (bool): compute box covariance regression parameters.
bbox_cov_dims (int): 4 for diagonal covariance, 10 for full covariance.
cls_var_loss (str): name of classification variance loss.
cls_var_num_samples (int): number of samples to be used for loss computation. Usually between 10-100.
bbox_cov_loss (str): name of box covariance loss.
bbox_cov_type (str): 'diagonal' or 'full'. This is used to train with loss functions that accept both types.
dropout_rate (float): 0-1, probability of drop.
annealing_step (int): step used for KL-divergence in evidential loss to fully be functional.
"""
super().__init__()
if isinstance(input_shape, int): # some backward compatibility
input_shape = ShapeSpec(channels=input_shape)
input_size = input_shape.channels * \
(input_shape.width or 1) * (input_shape.height or 1)
self.compute_cls_var = compute_cls_var
self.compute_bbox_cov = compute_bbox_cov
self.bbox_cov_dims = bbox_cov_dims
self.bbox_cov_num_samples = bbox_cov_num_samples
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self.cls_var_loss = cls_var_loss
self.cls_var_num_samples = cls_var_num_samples
self.annealing_step = annealing_step
self.bbox_cov_loss = bbox_cov_loss
self.bbox_cov_type = bbox_cov_type
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1.0 if cls_agnostic_bbox_reg else num_classes
box_dim = len(box2box_transform.weights)
self.bbox_pred = Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.compute_cls_var:
self.cls_var = Linear(input_size, num_classes + 1)
nn.init.normal_(self.cls_var.weight, std=0.0001)
nn.init.constant_(self.cls_var.bias, 0)
if self.compute_bbox_cov:
self.bbox_cov = Linear(
input_size,
num_bbox_reg_classes *
bbox_cov_dims)
nn.init.normal_(self.bbox_cov.weight, std=0.0001)
nn.init.constant_(self.bbox_cov.bias, 0)
self.box2box_transform = box2box_transform
self.smooth_l1_beta = smooth_l1_beta
self.test_score_thresh = test_score_thresh
self.test_nms_thresh = test_nms_thresh
self.test_topk_per_image = test_topk_per_image
@classmethod
def from_config(cls,
cfg,
input_shape,
compute_cls_var,
cls_var_loss,
cls_var_num_samples,
compute_bbox_cov,
bbox_cov_loss,
bbox_cov_type,
bbox_cov_dims,
bbox_cov_num_samples):
return {
"input_shape": input_shape,
"box2box_transform": Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS),
# fmt: off
"num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
"cls_agnostic_bbox_reg": cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG,
"smooth_l1_beta": cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA,
"test_score_thresh": cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
"test_nms_thresh": cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
"test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
"compute_cls_var": compute_cls_var,
"cls_var_loss": cls_var_loss,
"cls_var_num_samples": cls_var_num_samples,
"compute_bbox_cov": compute_bbox_cov,
"bbox_cov_dims": bbox_cov_dims,
"bbox_cov_loss": bbox_cov_loss,
"bbox_cov_type": bbox_cov_type,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE,
"annealing_step": cfg.SOLVER.STEPS[1],
"bbox_cov_num_samples": bbox_cov_num_samples
# fmt: on
}
def forward(self, x):
"""
Returns:
Tensor: Nx(K+1) logits for each box
Tensor: Nx4 or Nx(Kx4) bounding box regression deltas.
Tensor: Nx(K+1) logits variance for each box.
Tensor: Nx4(10) or Nx(Kx4(10)) covariance matrix parameters. 4 if diagonal, 10 if full.
"""
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
# Compute logits variance if needed
if self.compute_cls_var:
score_vars = self.cls_var(x)
else:
score_vars = None
# Compute box covariance if needed
if self.compute_bbox_cov:
proposal_covs = self.bbox_cov(x)
else:
proposal_covs = None
return scores, proposal_deltas, score_vars, proposal_covs
def losses(self, predictions, proposals, current_step=0):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
current_step: current optimizer step. Used for losses with an annealing component.
"""
global device
pred_class_logits, pred_proposal_deltas, pred_class_logits_var, pred_proposal_covs = predictions
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals_boxes = box_type.cat(
[p.proposal_boxes for p in proposals])
assert (
not proposals_boxes.tensor.requires_grad), "Proposals should not require gradients!"
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
gt_classes = cat([p.gt_classes for p in proposals], dim=0)
else:
proposals_boxes = Boxes(
torch.zeros(
0, 4, device=pred_proposal_deltas.device))
no_instances = len(proposals) == 0 # no instances found
# Compute Classification Loss
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_cls = 0.0 * F.cross_entropy(
pred_class_logits,
torch.zeros(
0,
dtype=torch.long,
device=pred_class_logits.device),
reduction="sum",)
else:
if self.compute_cls_var:
# Compute classification variance according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
if self.cls_var_loss == 'loss_attenuation':
num_samples = self.cls_var_num_samples
# Compute standard deviation
pred_class_logits_var = torch.sqrt(
torch.exp(pred_class_logits_var))
# Produce normal samples using logits as the mean and the standard deviation computed above
# Scales with GPU memory. 12 GB ---> 3 Samples per anchor for
# COCO dataset.
univariate_normal_dists = distributions.normal.Normal(
pred_class_logits, scale=pred_class_logits_var)
pred_class_stochastic_logits = univariate_normal_dists.rsample(
(num_samples,))
pred_class_stochastic_logits = pred_class_stochastic_logits.view(
(pred_class_stochastic_logits.shape[1] * num_samples, pred_class_stochastic_logits.shape[2], -1))
pred_class_logits = pred_class_stochastic_logits.squeeze(
2)
# Produce copies of the target classes to match the number of
# stochastic samples.
gt_classes_target = torch.unsqueeze(gt_classes, 0)
gt_classes_target = torch.repeat_interleave(
gt_classes_target, num_samples, dim=0).view(
(gt_classes_target.shape[1] * num_samples, -1))
gt_classes_target = gt_classes_target.squeeze(1)
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes_target, reduction="mean")
elif self.cls_var_loss == 'evidential':
# ToDo: Currently does not provide any reasonable mAP Results
# (15% mAP)
# Assume dirichlet parameters are output.
alphas = get_dir_alphas(pred_class_logits)
# Get sum of all alphas
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Generate one hot vectors for ground truth
one_hot_vectors = torch.nn.functional.one_hot(
gt_classes, alphas.shape[1])
# Compute loss. This loss attempts to put all evidence on the
# correct location.
per_instance_loss = (
one_hot_vectors * (torch.digamma(dirichlet_s) - torch.digamma(alphas)))
# Compute KL divergence regularizer loss
estimated_dirichlet = torch.distributions.dirichlet.Dirichlet(
(alphas - 1.0) * (1.0 - one_hot_vectors) + 1.0)
uniform_dirichlet = torch.distributions.dirichlet.Dirichlet(
torch.ones_like(one_hot_vectors).type(torch.FloatTensor).to(device))
kl_regularization_loss = torch.distributions.kl.kl_divergence(
estimated_dirichlet, uniform_dirichlet)
# Compute final loss
annealing_multiplier = torch.min(
torch.as_tensor(
current_step /
self.annealing_step).to(device),
torch.as_tensor(1.0).to(device))
per_proposal_loss = per_instance_loss.sum(
1) + annealing_multiplier * kl_regularization_loss
# Compute evidence auxiliary loss
evidence_maximization_loss = smooth_l1_loss(
dirichlet_s,
100.0 *
torch.ones_like(dirichlet_s).to(device),
beta=self.smooth_l1_beta,
reduction='mean')
evidence_maximization_loss *= annealing_multiplier
# Compute final loss
foreground_loss = per_proposal_loss[(gt_classes >= 0) & (
gt_classes < pred_class_logits.shape[1] - 1)]
background_loss = per_proposal_loss[gt_classes ==
pred_class_logits.shape[1] - 1]
loss_cls = (torch.mean(foreground_loss) + torch.mean(background_loss)
) / 2 + 0.01 * evidence_maximization_loss
else:
loss_cls = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean")
# Compute regression loss:
if no_instances:
# TODO 0.0 * pred.sum() is enough since PT1.6
loss_box_reg = 0.0 * smooth_l1_loss(
pred_proposal_deltas,
torch.zeros_like(pred_proposal_deltas),
0.0,
reduction="sum",
)
else:
gt_proposal_deltas = self.box2box_transform.get_deltas(
proposals_boxes.tensor, gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim
device = pred_proposal_deltas.device
bg_class_ind = pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero(
(gt_classes >= 0) & (gt_classes < bg_class_ind), as_tuple=True
)[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for
# agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background
# classes.
gt_class_cols = box_dim * \
fg_gt_classes[:, None] + torch.arange(box_dim, device=device)
gt_covar_class_cols = self.bbox_cov_dims * \
fg_gt_classes[:, None] + torch.arange(self.bbox_cov_dims, device=device)
loss_reg_normalizer = gt_classes.numel()
pred_proposal_deltas = pred_proposal_deltas[fg_inds[:,
None], gt_class_cols]
gt_proposals_delta = gt_proposal_deltas[fg_inds]
if self.compute_bbox_cov:
pred_proposal_covs = pred_proposal_covs[fg_inds[:,
None], gt_covar_class_cols]
pred_proposal_covs = clamp_log_variance(pred_proposal_covs)
if self.bbox_cov_loss == 'negative_log_likelihood':
if self.bbox_cov_type == 'diagonal':
# Ger foreground proposals.
_proposals_boxes = proposals_boxes.tensor[fg_inds]
# Compute regression negative log likelihood loss according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
loss_box_reg = 0.5 * torch.exp(-pred_proposal_covs) * smooth_l1_loss(
pred_proposal_deltas, gt_proposals_delta, beta=self.smooth_l1_beta)
loss_covariance_regularize = 0.5 * pred_proposal_covs
loss_box_reg += loss_covariance_regularize
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
else:
# Multivariate Gaussian Negative Log Likelihood loss using pytorch
# distributions.multivariate_normal.log_prob()
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky)
loss_box_reg = - \
multivariate_normal_dists.log_prob(gt_proposals_delta)
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
elif self.bbox_cov_loss == 'second_moment_matching':
# Compute regression covariance using second moment
# matching.
loss_box_reg = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta)
errors = (pred_proposal_deltas - gt_proposals_delta)
if self.bbox_cov_type == 'diagonal':
# Handel diagonal case
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_proposal_covs), errors ** 2, beta=self.smooth_l1_beta)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(
loss_box_reg) / loss_reg_normalizer
else:
# Handel full covariance case
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1))
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
predicted_covar = torch.matmul(
forecaster_cholesky, torch.transpose(
forecaster_cholesky, 2, 1))
second_moment_matching_term = smooth_l1_loss(
predicted_covar, gt_error_covar, beta=self.smooth_l1_beta, reduction='sum')
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term) / loss_reg_normalizer
elif self.bbox_cov_loss == 'energy_loss':
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs)
# Define per-anchor Distributions
multivariate_normal_dists = distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,))
distributions_samples_1 = distributions_samples[0:self.bbox_cov_num_samples, :, :]
distributions_samples_2 = distributions_samples[1:
self.bbox_cov_num_samples + 1, :, :]
# Compute energy score
loss_covariance_regularize = - smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
gt_proposals_delta.unsqueeze(0), self.bbox_cov_num_samples, dim=0)
loss_first_moment_match = 2.0 * smooth_l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
beta=self.smooth_l1_beta,
reduction="sum") / self.bbox_cov_num_samples # First term
# Final Loss
loss_box_reg = (
loss_first_moment_match + loss_covariance_regularize) / loss_reg_normalizer
else:
raise ValueError(
'Invalid regression loss name {}.'.format(
self.bbox_cov_loss))
# Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more
# elaborate regression variance losses.
standard_regression_loss = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",)
standard_regression_loss = standard_regression_loss / loss_reg_normalizer
probabilistic_loss_weight = get_probabilistic_loss_weight(
current_step, self.annealing_step)
loss_box_reg = (1.0 - probabilistic_loss_weight) * \
standard_regression_loss + probabilistic_loss_weight * loss_box_reg
else:
loss_box_reg = smooth_l1_loss(pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",)
loss_box_reg = loss_box_reg / loss_reg_normalizer
return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
def inference(self, predictions, proposals):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[torch.arange(
N, dtype=torch.long, device=predict_boxes.device), gt_classes]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas, _, _ = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores, _, _, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.cls_var_loss == "evidential":
alphas = get_dir_alphas(scores)
dirichlet_s = alphas.sum(1).unsqueeze(1)
# Compute probabilities
probs = alphas / dirichlet_s
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
# Todo: new detectron interface required copying code. Check for better
# way to inherit from FastRCNNConvFCHead.
@ROI_BOX_HEAD_REGISTRY.register()
class DropoutFastRCNNConvFCHead(nn.Module):
"""
A head with several 3x3 conv layers (each followed by norm & relu) and then
several fc layers (each followed by relu) and dropout.
"""
@configurable
def __init__(
self,
input_shape: ShapeSpec,
*,
conv_dims: List[int],
fc_dims: List[int],
conv_norm="",
dropout_rate
):
"""
NOTE: this interface is experimental.
Args:
input_shape (ShapeSpec): shape of the input feature.
conv_dims (list[int]): the output dimensions of the conv layers
fc_dims (list[int]): the output dimensions of the fc layers
conv_norm (str or callable): normalization for the conv layers.
See :func:`detectron2.layers.get_norm` for supported types.
dropout_rate (float): p for dropout layer
"""
super().__init__()
assert len(conv_dims) + len(fc_dims) > 0
self.dropout_rate = dropout_rate
self.use_dropout = self.dropout_rate != 0.0
self._output_size = (
input_shape.channels,
input_shape.height,
input_shape.width)
self.conv_norm_relus = []
for k, conv_dim in enumerate(conv_dims):
conv = Conv2d(
self._output_size[0],
conv_dim,
kernel_size=3,
padding=1,
bias=not conv_norm,
norm=get_norm(conv_norm, conv_dim),
activation=F.relu,
)
self.add_module("conv{}".format(k + 1), conv)
self.conv_norm_relus.append(conv)
self._output_size = (
conv_dim,
self._output_size[1],
self._output_size[2])
self.fcs = []
self.fcs_dropout = []
for k, fc_dim in enumerate(fc_dims):
fc = Linear(np.prod(self._output_size), fc_dim)
fc_dropout = nn.Dropout(p=self.dropout_rate)
self.add_module("fc{}".format(k + 1), fc)
self.add_module("fc_dropout{}".format(k + 1), fc_dropout)
self.fcs.append(fc)
self.fcs_dropout.append(fc_dropout)
self._output_size = fc_dim
for layer in self.conv_norm_relus:
weight_init.c2_msra_fill(layer)
for layer in self.fcs:
weight_init.c2_xavier_fill(layer)
@classmethod
def from_config(cls, cfg, input_shape):
num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV
conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM
num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC
fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM
return {
"input_shape": input_shape,
"conv_dims": [conv_dim] * num_conv,
"fc_dims": [fc_dim] * num_fc,
"conv_norm": cfg.MODEL.ROI_BOX_HEAD.NORM,
"dropout_rate": cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
}
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer, dropout in zip(self.fcs, self.fcs_dropout):
x = F.relu(dropout(layer(x)))
return x
@property
def output_shape(self):
"""
Returns:
ShapeSpec: the output feature shape
"""
o = self._output_size
if isinstance(o, int):
return ShapeSpec(channels=o)
else:
return ShapeSpec(channels=o[0], height=o[1], width=o[2])
| 43.325277 | 130 | 0.591232 | 4,944 | 43,022 | 4.85356 | 0.120955 | 0.021004 | 0.015128 | 0.009918 | 0.377355 | 0.291174 | 0.215203 | 0.182072 | 0.127021 | 0.113019 | 0 | 0.010793 | 0.33885 | 43,022 | 992 | 131 | 43.368952 | 0.832829 | 0.21654 | 0 | 0.240566 | 0 | 0 | 0.024737 | 0.002026 | 0 | 0 | 0 | 0.002016 | 0.009434 | 1 | 0.028302 | false | 0 | 0.033019 | 0.001572 | 0.105346 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
accbc8d0a8cffd294a62f1b19b455318a40e19f5 | 1,426 | py | Python | pytorch/basic.py | iamslash/examplesofml | 524b9daa31f81f35226d85737e15b62d6813a68c | [
"MIT"
] | null | null | null | pytorch/basic.py | iamslash/examplesofml | 524b9daa31f81f35226d85737e15b62d6813a68c | [
"MIT"
] | null | null | null | pytorch/basic.py | iamslash/examplesofml | 524b9daa31f81f35226d85737e15b62d6813a68c | [
"MIT"
] | null | null | null | # regression ANN
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
x = np.array([0, 1, 2, 3, 4])
y = 2 * x + 1
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
class ANN_regression(nn.Module):
def __init__(self, input_dim, output_dim):
super(ANN_regression, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x) :
return self.linear(x)
def main(epochs=2000):
# Create instance of model
model = ANN_regression(1, 1)
criterion = nn.MSELoss()
learning_rate = 0.01
optimiser = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(epochs):
epoch += 1
inputs = Variable(torch.from_numpy(x[:3]))
labels = Variable(torch.from_numpy(y[:3]))
optimiser.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimiser.step()
if epoch % 100 == 0:
print('epoch {}, loss {}'.format(epoch, loss.data[0]))
# Print Predictions
predicted = model.forward(Variable(torch.from_numpy(x[3:])))
plt.plot(x, y, 'go', label = 'targets', alpha = 0.5)
plt.plot(x, predicted, label = 'predictions', alpha = 0.5)
plt.show()
print(model.state_dict())
if __name__ == '__main__':
main()
| 27.960784 | 69 | 0.617812 | 198 | 1,426 | 4.29798 | 0.419192 | 0.007051 | 0.059929 | 0.077556 | 0.056404 | 0.056404 | 0 | 0 | 0 | 0 | 0 | 0.030641 | 0.244741 | 1,426 | 50 | 70 | 28.52 | 0.759517 | 0.051192 | 0 | 0 | 0 | 0 | 0.033383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.131579 | 0.026316 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
accd869edbfacb719956e06e2302b3733e58bd27 | 3,256 | py | Python | server/notify_run_server/model_sqlalchemy.py | ankitshekhawat/notify.run | be4c6f1721f811d6bb309b0036a877ce3bcad62a | [
"MIT"
] | null | null | null | server/notify_run_server/model_sqlalchemy.py | ankitshekhawat/notify.run | be4c6f1721f811d6bb309b0036a877ce3bcad62a | [
"MIT"
] | null | null | null | server/notify_run_server/model_sqlalchemy.py | ankitshekhawat/notify.run | be4c6f1721f811d6bb309b0036a877ce3bcad62a | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Any, List
from sqlalchemy import (JSON, Column, DateTime, ForeignKey, Integer, String,
create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.orm.attributes import flag_modified
from notify_run_server.model import (NoSuchChannel, NotifyModel,
generate_channel_id)
from notify_run_server.params import DB_URL
Base = declarative_base() # type: Any
class Channel(Base):
__tablename__ = 'channel'
id = Column(String, primary_key=True)
created = Column(DateTime)
meta = Column(JSON)
subscriptions = Column(JSON)
messages = relationship('Message')
class Message(Base):
__tablename__ = 'message'
id = Column(Integer, primary_key=True)
channel_id = Column(String, ForeignKey('channel.id'))
messageTime = Column(DateTime)
message = Column(String)
data = Column(JSON)
result = Column(JSON)
channel = relationship('Channel', back_populates='messages')
class SqlNotifyModel(NotifyModel):
def __init__(self):
engine = create_engine(DB_URL, echo=False)
Base.metadata.create_all(engine)
self._sessionmaker = sessionmaker(bind=engine)
def register_channel(self, meta: dict) -> str:
session = self._sessionmaker()
channel = Channel(
id=generate_channel_id(),
created=datetime.now(),
meta=dict(),
subscriptions=dict(),
)
session.add(channel)
session.commit()
return channel.id
def add_subscription(self, channel_id: str, subscription: dict):
session = self._sessionmaker()
channel = session.query(Channel).get(channel_id)
if channel is None:
raise NoSuchChannel(channel_id)
channel.subscriptions[subscription['id']
] = subscription['subscription']
flag_modified(channel, 'subscriptions')
session.commit()
def get_channel(self, channel_id: str):
session = self._sessionmaker()
channel = session.query(Channel).get(channel_id)
if channel is None:
raise NoSuchChannel(channel_id)
return {
'channelId': channel.id,
'created': channel.created,
'meta': channel.meta,
'subscriptions': channel.subscriptions,
}
def get_messages(self, channel_id: str) -> List[dict]:
session = self._sessionmaker()
messages = session.query(Message).filter_by(
channel_id=channel_id).order_by(Message.messageTime.desc())[:10]
return [{
'message': message.message,
'time': message.messageTime,
'result': message.result,
} for message in messages]
def put_message(self, channel_id: str, message: str, data: dict, result: list):
session = self._sessionmaker()
message = Message(
channel_id=channel_id,
messageTime=datetime.now(),
message=message,
data=data,
result=result,
)
session.add(message)
session.commit()
| 30.148148 | 83 | 0.628993 | 332 | 3,256 | 5.993976 | 0.256024 | 0.090452 | 0.057789 | 0.032161 | 0.121608 | 0.103518 | 0.103518 | 0.103518 | 0.103518 | 0.103518 | 0 | 0.000845 | 0.273034 | 3,256 | 107 | 84 | 30.429907 | 0.839882 | 0.002764 | 0 | 0.168675 | 0 | 0 | 0.037904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072289 | false | 0 | 0.096386 | 0 | 0.409639 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acceb9d981c818b7068f77b0e15a765f658edb69 | 170,782 | py | Python | hera_cal/abscal.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 10 | 2017-06-22T22:14:23.000Z | 2022-03-08T17:33:45.000Z | hera_cal/abscal.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 610 | 2017-06-22T22:16:27.000Z | 2022-03-31T16:11:34.000Z | hera_cal/abscal.py | LBJ-Wade/hera_cal | 868122b04b8e7f627aa72317427f89ca3eaf7d60 | [
"MIT"
] | 8 | 2017-10-30T18:16:19.000Z | 2021-04-01T09:20:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 the HERA Project
# Licensed under the MIT License
"""
abscal.py
---------
Calibrate measured visibility
data to a visibility model using
linearizations of the (complex)
antenna-based calibration equation:
V_ij,xy^data = g_i_x * conj(g_j_y) * V_ij,xy^model.
Complex-valued parameters are broken into amplitudes and phases as:
V_ij,xy^model = exp(eta_ij,xy^model + i * phi_ij,xy^model)
g_i_x = exp(eta_i_x + i * phi_i_x)
g_j_y = exp(eta_j_y + i * phi_j_y)
V_ij,xy^data = exp(eta_ij,xy^data + i * phi_ij,xy^data)
where {i,j} index antennas and {x,y} are the polarization of
the i-th and j-th antenna respectively.
"""
import os
from collections import OrderedDict as odict
import copy
import argparse
import numpy as np
import operator
from functools import reduce
from scipy import signal, interpolate, spatial
from scipy.optimize import brute, minimize
from pyuvdata import UVCal, UVData
import linsolve
import warnings
from . import version
from .apply_cal import calibrate_in_place
from .smooth_cal import pick_reference_antenna, rephase_to_refant
from .flag_utils import synthesize_ant_flags
from .noise import predict_noise_variance_from_autos
from . import utils
from . import redcal
from . import io
from . import apply_cal
from .datacontainer import DataContainer
from .utils import echo, polnum2str, polstr2num, reverse_bl, split_pol, split_bl, join_bl, join_pol
PHASE_SLOPE_SOLVERS = ['linfit', 'dft', 'ndim_fft'] # list of valid solvers for global_phase_slope_logcal
def abs_amp_logcal(model, data, wgts=None, verbose=True, return_gains=False, gain_ants=[]):
"""
calculate absolute (array-wide) gain amplitude scalar
with a linear solver using the logarithmically linearized equation:
ln|V_ij,xy^data / V_ij,xy^model| = eta_x + eta_y
where {i,j} index antenna numbers and {x,y} index polarizations
of the i-th and j-th antennas respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
verbose : print output, type=boolean, [default=False]
Output:
-------
if not return_gains:
fit : dictionary with 'eta_{}' key for amplitude scalar for {} polarization,
which has the same shape as the ndarrays in the model
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for abs_amp_logcal", verbose=verbose)
# get keys from model and data dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# abs of amplitude ratio is ydata independent variable
ydata = odict([(k, np.log(np.abs(data[k] / model[k]))) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
# a{} is a dummy variable to prevent linsolve from overwriting repeated measurements
eqns = odict([(k, "a{}*eta_{}+a{}*eta_{}".format(i, split_pol(k[-1])[0],
i, split_pol(k[-1])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict([("a{}".format(i), 1.0) for i, k in enumerate(keys)])
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
if not return_gains:
return fit
else:
return {ant: np.exp(fit['eta_{}'.format(ant[1])]).astype(np.complex) for ant in gain_ants}
def abs_amp_lincal(model, data, wgts=None, verbose=True, return_gains=False, gain_ants=[],
conv_crit=None, maxiter=100):
"""
calculate absolute (array-wide) gain amplitude scalar
with a linear (or linearized) solver using the equation:
V_ij,xy^data = A_x A_y * V_ij,xy^model
where {i,j} index antenna numbers and {x,y} index polarizations
of the i-th and j-th antennas respectively. When no cross-polarized
visibilities are involved, A^2 is solved for linearly for both real
and imaginary parts simultaneously as separate equations. Otherwise,
we have to use a linear-product solving algorithm, using abs_amp_logcal
as a starting point.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
conv_crit : A convergence criterion below which to stop iterating LinProductSolver.
Converegence is measured L2-norm of the change in the solution of the
variables divided by the L2-norm of the solution itself.
Default: None (resolves to machine precision for inferred dtype).
Note: only used when data and model include cross-polarized visibilities.
maxiter : Integer maximum number of iterations to perform LinProductSolver.
Note: only used when data and model include cross-polarized visibilities.
verbose : print output, type=boolean, [default=False]
Output:
-------
if not return_gains:
fit : dictionary with 'A_{}' key for amplitude scalar for {} polarization,
which has the same shape as the ndarrays in the model
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for abs_amp_lincal", verbose=verbose)
# get keys from model and data dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# check to see whether any cross-polarizations are being used (this will require a different solver)
cross_pols_used = False
for k in keys:
ant0, ant1 = split_bl(k)
if ant0[1] != ant1[1]:
cross_pols_used = True
break
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
# fill nans and infs, minimally duplicating data to save memory
data_here = {}
model_here = {}
for k in keys:
if np.any(~np.isfinite(data[k])):
data_here[k] = copy.deepcopy(data[k])
fill_dict_nans(data_here[k], wgts=wgts[k], nan_fill=0.0, inf_fill=0.0, array=True)
else:
data_here[k] = data[k]
if np.any(~np.isfinite(model[k])):
model_here[k] = copy.deepcopy(model[k])
fill_dict_nans(model_here[k], wgts=wgts[k], nan_fill=0.0, inf_fill=0.0, array=True)
else:
model_here[k] = model[k]
# setup linsolve equations, either for A (if cross_pols_used) or A^2
ls_data = {}
ls_wgts = {}
ls_consts = {}
for i, k in enumerate(keys):
pol0, pol1 = split_pol(k[-1])
if cross_pols_used:
re_eq_str = f'model_re_{i}*A_{pol0}*A_{pol1}'
im_eq_str = f'model_im_{i}*A_{pol0}*A_{pol1}'
else:
re_eq_str = f'model_re_{i}*Asq_{pol0}'
im_eq_str = f'model_im_{i}*Asq_{pol0}'
ls_data[re_eq_str] = np.real(data_here[k])
ls_wgts[re_eq_str] = wgts[k]
ls_consts[f'model_re_{i}'] = np.real(model_here[k])
ls_data[im_eq_str] = np.imag(data_here[k])
ls_wgts[im_eq_str] = wgts[k]
ls_consts[f'model_im_{i}'] = np.imag(model_here[k])
# setup linsolve and run
echo("...running linsolve", verbose=verbose)
if cross_pols_used:
# use abs_amp_logcal to get a starting point solution
sol0 = abs_amp_logcal(model, data, wgts=wgts)
sol0 = {k.replace('eta_', 'A_'): np.exp(sol) for k, sol in sol0.items()}
# now solve by linearizing
solver = linsolve.LinProductSolver(ls_data, sol0, wgts=ls_wgts, constants=ls_consts)
meta, fit = solver.solve_iteratively(conv_crit=conv_crit, maxiter=maxiter)
else:
# in this case, the equations are already linear in A^2
solver = linsolve.LinearSolver(ls_data, wgts=ls_wgts, constants=ls_consts)
fit = solver.solve()
fit = {k.replace('Asq', 'A'): np.sqrt(np.abs(sol)) for k, sol in fit.items()}
echo("...finished linsolve", verbose=verbose)
if not return_gains:
return fit
else:
return {ant: np.abs(fit[f'A_{ant[1]}']).astype(np.complex) for ant in gain_ants}
def _count_nDims(antpos, assume_2D=True):
'''Antenna position dimension counter helper function used in solvers that support higher-dim abscal.'''
nDims = len(list(antpos.values())[0])
for k in antpos.keys():
assert len(antpos[k]) == nDims, 'Not all antenna positions have the same dimensionality.'
if assume_2D:
assert len(antpos[k]) >= 2, 'Since assume_2D is True, all antenna positions must 2D or higher.'
return nDims
def TT_phs_logcal(model, data, antpos, wgts=None, refant=None, assume_2D=True,
zero_psi=True, four_pol=False, verbose=True, return_gains=False, gain_ants=[]):
"""
calculate overall gain phase and gain phase Tip-Tilt slopes (East-West and North-South)
with a linear solver applied to the logarithmically linearized equation:
angle(V_ij,xy^data / V_ij,xy^model) = angle(g_i_x * conj(g_j_y))
= psi_x - psi_y + Phi^ew_x*r_i^ew + Phi^ns_x*r_i^ns
- Phi^ew_y*r_j^ew - Phi^ns_y*r_j^ns
where psi is the overall gain phase across the array [radians] for x and y polarizations,
and PHI^ew, PHI^ns are the gain phase slopes across the east-west and north-south axes
of the array in units of [radians / meter], where x and y denote the pol of the i-th and j-th
antenna respectively. The phase slopes are polarization independent by default (1pol & 2pol cal),
but can be merged with the four_pol parameter (4pol cal). r_i is the antenna position vector
of the i^th antenna.
If assume_2D is not true, this solves for the tip-tilt degeneracies of antenna positions in an
arbitary number of dimensions, the output of redcal.reds_to_antpos() for an array with extra
tip-tilt degeneracies. In that case, the fit parameters are Phi_0, Phi_1, Phi_2, etc.,
generalizing the equation above to use the n-dimensional dot product Phi . r.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
antpos : antenna position vectors, type=dictionary
keys are antenna integers, values are antenna positions vectors
(preferably centered at center of array). If assume_2D is True, it is assumed that the
[0] index contains the east-west separation and [1] index the north-south separation
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
zero_psi : set psi to be identically zero in linsolve eqns, type=boolean, [default=False]
four_pol : type=boolean, even if multiple polarizations are present in data, make free
variables polarization un-aware: i.e. one solution across all polarizations.
This is the same assumption as 4-polarization calibration in omnical.
verbose : print output, type=boolean, [default=False]
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary with psi key for overall gain phase and Phi_ew and Phi_ns array containing
phase slopes across the EW and NS directions of the array. There is a set of each
of these variables per polarization. If assume_2D is False, then these will be the
more general Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for TT_phs_logcal", verbose=verbose)
# get keys from model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# angle of phs ratio is ydata independent variable
# angle after divide
ydata = {k: np.angle(data[k] / model[k]) for k in keys}
# make unit weights if None
if wgts is None:
wgts = {k: np.ones_like(ydata[k], dtype=np.float) for k in keys}
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = {k: antpos[k] - antpos[refant] for k in antpos.keys()}
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# setup linsolve equations
eqns = {}
for k in keys:
ap0, ap1 = split_pol(k[2])
eqns[k] = f'psi_{ap0}*a1 - psi_{ap1}*a2'
for d in range((nDims, 2)[assume_2D]):
if four_pol:
eqns[k] += f' + Phi_{d}*r_{d}_{k[0]} - Phi_{d}*r_{d}_{k[1]}'
else:
eqns[k] += f' + Phi_{d}_{ap0}*r_{d}_{k[0]} - Phi_{d}_{ap1}*r_{d}_{k[1]}'
# set design matrix entries
ls_design_matrix = {}
for a in antnums:
for d in range((nDims, 2)[assume_2D]):
ls_design_matrix[f'r_{d}_{a}'] = antpos[a][d]
if zero_psi:
ls_design_matrix.update({"a1": 0.0, "a2": 0.0})
else:
ls_design_matrix.update({"a1": 1.0, "a2": 1.0})
# setup linsolve dictionaries
ls_data = {eqns[k]: ydata[k] for k in keys}
ls_wgts = {eqns[k]: wgts[k] for k in keys}
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'Phi_0' in p:
fit[p.replace('Phi_0', 'Phi_ew')] = fit[p]
del fit[p]
if 'Phi_1' in p:
fit[p.replace('Phi_1', 'Phi_ns')] = fit[p]
del fit[p]
return fit
else:
# compute gains, dotting each parameter into the corresponding coordinate in that dimension
gains = {}
for ant in gain_ants:
gains[ant] = np.exp(1.0j * fit['psi_{}'.format(ant[1])])
if four_pol:
Phis = [fit[f'Phi_{d}'] for d in range((nDims, 2)[assume_2D])]
else:
Phis = [fit[f'Phi_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
gains[ant] *= np.exp(1.0j * (np.einsum('i,ijk->jk', antpos[ant[0]][0:len(Phis)], Phis)))
return gains
def amp_logcal(model, data, wgts=None, verbose=True):
"""
calculate per-antenna gain amplitude via the
logarithmically linearized equation
ln|V_ij,xy^data / V_ij,xy^model| = ln|g_i_x| + ln|g_j_y|
= eta_i_x + eta_j_y
where {x,y} represent the polarization of the i-th and j-th antenna
respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
Output:
-------
fit : dictionary containing eta_i = ln|g_i| for each antenna
"""
echo("...configuring linsolve data for amp_logcal", verbose=verbose)
# get keys from model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# difference of log-amplitudes is ydata independent variable
ydata = odict([(k, np.log(np.abs(data[k] / model[k]))) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
eqns = odict([(k, "eta_{}_{} + eta_{}_{}".format(k[0], split_pol(k[-1])[0],
k[1], split_pol(k[-1])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict()
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
return fit
def phs_logcal(model, data, wgts=None, refant=None, verbose=True):
"""
calculate per-antenna gain phase via the
logarithmically linearized equation
angle(V_ij,xy^data / V_ij,xy^model) = angle(g_i_x) - angle(g_j_y)
= phi_i_x - phi_j_y
where {x,y} represent the pol of the i-th and j-th antenna respectively.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
refant : integer antenna number of reference antenna, defult=None
The refant phase will be set to identically zero in the linear equations.
By default this takes the first antenna in data.
Output:
-------
fit : dictionary containing phi_i = angle(g_i) for each antenna
"""
echo("...configuring linsolve data for phs_logcal", verbose=verbose)
# get keys from match between data and model dictionary
keys = sorted(set(model.keys()) & set(data.keys()))
# angle of visibility ratio is ydata independent variable
ydata = odict([(k, np.angle(data[k] / model[k])) for k in keys])
# make weights if None
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(ydata[k], dtype=np.float)
# fill nans and infs
fill_dict_nans(ydata, wgts=wgts, nan_fill=0.0, inf_fill=0.0)
# setup linsolve equations
eqns = odict([(k, "phi_{}_{} - phi_{}_{}".format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_design_matrix = odict()
# setup linsolve dictionaries
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], wgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: list(split_pol(k[2])), keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(wgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
return fit
def delay_lincal(model, data, wgts=None, refant=None, df=9.765625e4, f0=0., solve_offsets=True, medfilt=True,
kernel=(1, 5), verbose=True, antpos=None, four_pol=False, edge_cut=0):
"""
Solve for per-antenna delays according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = delay(g_i_x) - delay(g_j_y)
Can also solve for per-antenna phase offsets with the solve_offsets kwarg.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as reference
Set the reference antenna to have zero delay, such that its phase is set to identically
zero across all freqs. By default use the first key in data.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of the first channel in the data (used for offsets)
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge in FFT window
Output:
-------
fit : dictionary containing delay (tau_i_x) for each antenna and optionally
offset (phi_i_x) for each antenna.
"""
echo("...configuring linsolve data for delay_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
# make wgts
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
# median filter and FFT to get delays
ratio_delays = []
ratio_offsets = []
ratio_wgts = []
for i, k in enumerate(keys):
ratio = data[k] / model[k]
# replace nans
nan_select = np.isnan(ratio)
ratio[nan_select] = 0.0
wgts[k][nan_select] = 0.0
# replace infs
inf_select = np.isinf(ratio)
ratio[inf_select] = 0.0
wgts[k][inf_select] = 0.0
# get delays
dly, offset = utils.fft_dly(ratio, df, f0=f0, wgts=wgts[k], medfilt=medfilt, kernel=kernel, edge_cut=edge_cut)
# set nans to zero
rwgts = np.nanmean(wgts[k], axis=1, keepdims=True)
isnan = np.isnan(dly)
dly[isnan] = 0.0
rwgts[isnan] = 0.0
offset[isnan] = 0.0
ratio_delays.append(dly)
ratio_offsets.append(offset)
ratio_wgts.append(rwgts)
ratio_delays = np.array(ratio_delays)
ratio_offsets = np.array(ratio_offsets)
ratio_wgts = np.array(ratio_wgts)
# form ydata
ydata = odict(zip(keys, ratio_delays))
# form wgts
ywgts = odict(zip(keys, ratio_wgts))
# setup linsolve equation dictionary
eqns = odict([(k, 'tau_{}_{} - tau_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
# setup design matrix dictionary
ls_design_matrix = odict()
# setup linsolve data dictionary
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
# get unique gain polarizations
gain_pols = np.unique(list(map(lambda k: [split_pol(k[2])[0], split_pol(k[2])[1]], keys)))
# set reference antenna phase to zero
if refant is None:
refant = keys[0][0]
assert np.array(list(map(lambda k: refant in k, keys))).any(), "refant {} not found in data and model".format(refant)
for p in gain_pols:
ls_data['tau_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['tau_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
# setup linsolve parameters
ydata = odict(zip(keys, ratio_offsets))
eqns = odict([(k, 'phi_{}_{} - phi_{}_{}'.format(k[0], split_pol(k[2])[0],
k[1], split_pol(k[2])[1])) for i, k in enumerate(keys)])
ls_data = odict([(eqns[k], ydata[k]) for i, k in enumerate(keys)])
ls_wgts = odict([(eqns[k], ywgts[k]) for i, k in enumerate(keys)])
ls_design_matrix = odict()
for p in gain_pols:
ls_data['phi_{}_{}'.format(refant, p)] = np.zeros_like(list(ydata.values())[0])
ls_wgts['phi_{}_{}'.format(refant, p)] = np.ones_like(list(ywgts.values())[0])
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
offset_fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
fit.update(offset_fit)
return fit
def delay_slope_lincal(model, data, antpos, wgts=None, refant=None, df=9.765625e4, f0=0.0, medfilt=True,
kernel=(1, 5), assume_2D=True, four_pol=False, edge_cut=0, time_avg=False,
return_gains=False, gain_ants=[], verbose=True):
"""
Solve for an array-wide delay slope according to the equation
delay(V_ij,xy^data / V_ij,xy^model) = dot(T_x, r_i) - dot(T_y, r_j)
This does not solve for per-antenna delays, but rather a delay slope across the array.
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
df : type=float, frequency spacing between channels in Hz
f0 : type=float, frequency of 0th channel in Hz.
Optional, but used to get gains without a delay offset.
medfilt : type=boolean, median filter visiblity ratio before taking fft
kernel : type=tuple, dtype=int, kernel for multi-dimensional median filter
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like T_0, T_1, T_2, etc. corresponding to the dimensions in antpos.
four_pol : type=boolean, if True, fit multiple polarizations together
edge_cut : int, number of channels to exclude at each band edge of vis in FFT window
time_avg : boolean, if True, replace resultant antenna delay slope with the median across time
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing delay slope (T_x) for each pol [seconds / meter].
If assume_2D is False, then these will be the more general T_0, T_1, T_2, etc.
corresponding to the dimensions in antpos, instead of T_ew or T_ns.
else:
gains: dictionary with gain_ants as keys and gain waterfall arrays as values
"""
echo("...configuring linsolve data for delay_slope_lincal", verbose=verbose)
# get shared keys
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make unit wgts if None
if wgts is None:
wgts = {k: np.ones_like(data[k], dtype=np.float) for k in keys}
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = {k: antpos[k] - antpos[refant] for k in antpos.keys()}
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# median filter and FFT to get delays
ydata = {}
ywgts = {}
for i, k in enumerate(keys):
ratio = data[k] / model[k]
ratio /= np.abs(ratio)
# replace nans and infs
wgts[k][~np.isfinite(ratio)] = 0.0
ratio[~np.isfinite(ratio)] = 0.0
# get delays
ydata[k], _ = utils.fft_dly(ratio, df, wgts=wgts[k], f0=f0, medfilt=medfilt, kernel=kernel, edge_cut=edge_cut)
# set nans to zero
ywgts[k] = np.nanmean(wgts[k], axis=1, keepdims=True)
isnan = np.isnan(ydata[k])
ydata[k][isnan] = 0.0
ywgts[k][isnan] = 0.0
# setup antenna position terms
r_ew = {a: f"r_ew_{a}" for a in antnums}
r_ns = {a: f"r_ns_{a}" for a in antnums}
# setup linsolve equations
eqns = {k: '' for k in keys}
for k in keys:
ap0, ap1 = split_pol(k[2])
for d in range((nDims, 2)[assume_2D]):
if len(eqns[k]) > 0:
eqns[k] += ' + '
if four_pol:
eqns[k] += f'T_{d}*r_{d}_{k[0]} - T_{d}*r_{d}_{k[1]}'
else:
eqns[k] += f'T_{d}_{ap0}*r_{d}_{k[0]} - T_{d}_{ap1}*r_{d}_{k[1]}'
# set design matrix entries
ls_design_matrix = {}
for a in antnums:
for d in range((nDims, 2)[assume_2D]):
ls_design_matrix[f'r_{d}_{a}'] = antpos[a][d]
# setup linsolve data dictionary
ls_data = {eqns[k]: ydata[k] for k in keys}
ls_wgts = {eqns[k]: ywgts[k] for k in keys}
# setup linsolve and run
sol = linsolve.LinearSolver(ls_data, wgts=ls_wgts, **ls_design_matrix)
echo("...running linsolve", verbose=verbose)
fit = sol.solve()
echo("...finished linsolve", verbose=verbose)
# time average
if time_avg:
Ntimes = list(fit.values())[0].shape[0]
for k in fit:
fit[k] = np.repeat(np.moveaxis(np.median(fit[k], axis=0)[np.newaxis], 0, 0), Ntimes, axis=0)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'T_0' in p:
fit[p.replace('T_0', 'T_ew')] = fit[p]
del fit[p]
if 'T_1' in p:
fit[p.replace('T_1', 'T_ns')] = fit[p]
del fit[p]
return fit
else:
gains = {}
for ant in gain_ants:
# construct delays from delay slopes
if four_pol:
Taus = [fit[f'T_{d}'] for d in range((nDims, 2)[assume_2D])]
else:
Taus = [fit[f'T_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
delays = np.einsum('ijk,i->j', Taus, antpos[ant[0]][0:len(Taus)])
# construct gains from freqs and delays
freqs = f0 + np.arange(list(data.values())[0].shape[1]) * df
gains[ant] = np.exp(2.0j * np.pi * np.outer(delays, freqs))
return gains
def dft_phase_slope_solver(xs, ys, data, flags=None):
'''Solve for spatial phase slopes across an array by looking for the peak in the DFT.
This is analogous to the method in utils.fft_dly(), except its in 2D and does not
assume a regular grid for xs and ys.
Arguments:
xs: 1D array of x positions (e.g. of antennas or baselines)
ys: 1D array of y positions (must be same length as xs)
data: ndarray of complex numbers to fit with a phase slope. The first dimension must match
xs and ys, but subsequent dimensions will be preserved and solved independently.
Any np.nan in data is interpreted as a flag.
flags: optional array of flags of data not to include in the phase slope solver.
Returns:
slope_x, slope_y: phase slopes in units of radians/[xs] where the best fit phase slope plane
is np.exp(2.0j * np.pi * (xs * slope_x + ys * slope_y)). Both have the same shape
the data after collapsing along the first dimension.
'''
# use the minimum and maximum difference between positions to define the search range and sampling in Fourier space
deltas = [((xi - xj)**2 + (yi - yj)**2)**.5 for i, (xi, yi) in enumerate(zip(xs, ys))
for (xj, yj) in zip(xs[i + 1:], ys[i + 1:])]
search_slice = slice(-1.0 / np.min(deltas), 1.0 / np.min(deltas), 1.0 / np.max(deltas))
# define cost function
def dft_abs(k, x, y, z):
return -np.abs(np.dot(z, np.exp(-2.0j * np.pi * (x * k[0] + y * k[1]))))
# set up flags, treating nans as flags
if flags is None:
flags = np.zeros_like(data, dtype=bool)
flags = flags | np.isnan(data)
# loop over data, minimizing the cost function
dflat = data.reshape((len(xs), -1))
fflat = flags.reshape((len(xs), -1))
slope_x = np.zeros_like(dflat[0, :].real)
slope_y = np.zeros_like(dflat[0, :].real)
for i in range(dflat.shape[1]):
if not np.all(np.isnan(dflat[:, i])):
dft_peak = brute(dft_abs, (search_slice, search_slice),
(xs[~fflat[:, i]], ys[~fflat[:, i]],
dflat[:, i][~fflat[:, i]]), finish=minimize)
slope_x[i] = dft_peak[0]
slope_y[i] = dft_peak[1]
return 2 * np.pi * slope_x.reshape(data.shape[1:]), 2 * np.pi * slope_y.reshape(data.shape[1:])
def ndim_fft_phase_slope_solver(data, bl_vecs, assume_2D=True, zero_pad=2, bl_error_tol=1.0):
'''Find phase slopes across the array in the data. Similar to utils.fft_dly,
but can grid arbitarary bl_vecs in N dimensions (for example, when using
generealized antenna positions from redcal.reds_to_antpos in arrays with
extra degeneracies).
Parameters:
-----------
data : dictionary or DataContainer mapping keys to (complex) ndarrays.
All polarizations are treated equally and solved for together.
bl_vecs : dictionary mapping keys in data to vectors in N dimensions
assume_2D : if True, assume N == 2 and only use the first two dimensions of bl_vecs.
zero_pad : float factor by which to expand the grid onto which the data is binned.
Increases resolution in Fourier space at the cost of runtime/memory.
Must be >= 1.
bl_error_tol : float used to define non-zero elements of baseline vectors.
This helps set the fundamental resolution of the grid.
Output:
-------
phase_slopes : list of length N dimensions. Each element is the same shape
as each entry in data. Contains the phase gradients in units
of 1 / [bl_vecs].
'''
nDim = _count_nDims(bl_vecs, assume_2D=assume_2D)
if assume_2D:
nDim = 2
keys = sorted(list(bl_vecs.keys()))
# Figure out a grid for baselines and
coords = []
all_bins = []
bl_vecs_array = np.array([bl_vecs[k] for k in keys])
assert zero_pad >= 1, f'zero_pad={zero_pad}, but it must be greater than or equal to 1.'
for d in range(nDim):
min_comp = np.min(bl_vecs_array[:, d])
max_comp = np.max(bl_vecs_array[:, d])
# pick minimum delta in this dimension inconsistent with 0 using bl_error_tol
dbl = np.min(np.abs(bl_vecs_array[:, d])[np.abs(bl_vecs_array[:, d]) >= bl_error_tol])
comp_range = max_comp - min_comp
bins = np.arange(min_comp - dbl - comp_range * (zero_pad - 1) / 2,
max_comp + 2 * dbl + comp_range * (zero_pad - 1) / 2, dbl)
all_bins.append(bins)
coords.append(np.digitize(bl_vecs_array[:, d], bins))
coords = np.array(coords).T
# create and fill grid with complex data
digitized = np.zeros(tuple([len(b) for b in all_bins]) + data[keys[0]].shape, dtype=complex)
for i, k in enumerate(keys):
digitized[tuple(coords[i])] = data[k]
digitized[~np.isfinite(digitized)] = 0
# FFT along first nDim dimensions
digitized_fft = np.fft.fftn(digitized, axes=tuple(range(nDim)))
# Condense the FFTed dimensions and find the max along them
new_shape = (np.prod(digitized_fft.shape[0:nDim]),) + data[keys[0]].shape
arg_maxes = digitized_fft.reshape(new_shape).argmax(0)
# Find the coordinates of the peaks in the FFT dimensions
peak_coords = np.unravel_index(arg_maxes, digitized_fft.shape[0:nDim])
# Convert coordinates to phase slopes using fft_freq
phase_slopes = []
for d in range(nDim):
fourier_modes = np.fft.fftfreq(len(all_bins[d]), np.median(np.diff(all_bins[d])))
phase_slopes.append(fourier_modes[peak_coords[d]] * 2 * np.pi)
return phase_slopes
def global_phase_slope_logcal(model, data, antpos, reds=None, solver='linfit', wgts=None, refant=None,
assume_2D=True, verbose=True, tol=1.0, edge_cut=0, time_avg=False,
zero_pad=2, return_gains=False, gain_ants=[]):
"""
Solve for a frequency-independent spatial phase slope using the equation
median_over_freq(angle(V_ij,xy^data / V_ij,xy^model)) = dot(Phi_x, r_i) - dot(Phi_y, r_j)
Parameters:
-----------
model : visibility data of refence model, type=DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
data : visibility data of measurements, type=DataContainer
keys are antenna pair + pol tuples (must match model), values are
complex ndarray visibilities matching shape of model
antpos : type=dictionary, antpos dictionary. antenna num as key, position vector as value.
reds : list of list of redundant baselines. If left as None (default), will try to infer
reds from antpos, though if the antenna position dimensionaility is > 3, this will fail.
solver : 'linfit' uses linsolve to fit phase slope across the array.
'dft' uses a spatial Fourier transform to find a phase slope, only works in 2D.
'ndim_fft' uses a gridded spatial Fourier transform instead, but works in ND.
wgts : weights of data, type=DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
verbose : print output, type=boolean, [default=False]
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
edge_cut : int, number of channels to exclude at each band edge in phase slope solver
time_avg : boolean, if True, replace resultant antenna phase slopes with the median across time
zero_pad : float factor by which to expand the grid onto which the data is binned. Only used
for ndim_fft mode. Must be >= 1.
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing frequency-indpendent phase slope, e.g. Phi_ns_Jxx
for each position component and polarization in units of radians / [antpos].
If assume_2D is False, then these will be the more general Phi_0, Phi_1,
Phi_2, etc. corresponding to the dimensions in antpos.
else:
gains : dictionary with gain_ants as keys and gain waterfall arrays as values
"""
# check solver and edgecut
assert solver in PHASE_SLOPE_SOLVERS, f"Unrecognized solver {solver}"
echo(f"...configuring global_phase_slope_logcal for the {solver} algorithm", verbose=verbose)
assert 2 * edge_cut < list(data.values())[0].shape[1] - 1, "edge_cut cannot be >= Nfreqs/2 - 1"
# get keys from model and data dictionaries
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make weights if None and make flags
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
flags = DataContainer({k: ~wgts[k].astype(np.bool) for k in wgts})
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = odict(list(map(lambda k: (k, antpos[k] - antpos[refant]), antpos.keys())))
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# average data over baselines
if reds is None:
reds = redcal.get_pos_reds(antpos, bl_error_tol=tol)
ap = data.antpairs()
reds_here = []
for red in reds:
red_here = [bl[0:2] for bl in red if bl[0:2] in ap or bl[0:2][::-1] in ap] # if the reds have polarizations, ignore them
if len(red_here) > 0:
reds_here.append(red_here)
avg_data, avg_flags, _ = utils.red_average(data, reds=reds_here, flags=flags, inplace=False)
red_keys = list(avg_data.keys())
avg_wgts = DataContainer({k: (~avg_flags[k]).astype(np.float) for k in avg_flags})
avg_model, _, _ = utils.red_average(model, reds=reds_here, flags=flags, inplace=False)
ls_data, ls_wgts, bl_vecs, pols = {}, {}, {}, {}
for rk in red_keys:
# build equation string
eqn_str = ''
ap0, ap1 = split_pol(rk[2])
for d in range(nDims):
if len(eqn_str) > 0:
eqn_str += ' + '
eqn_str += f'{antpos[rk[0]][d]}*Phi_{d}_{ap0} - {antpos[rk[1]][d]}*Phi_{d}_{ap1}'
bl_vecs[eqn_str] = antpos[rk[0]] - antpos[rk[1]]
pols[eqn_str] = rk[2]
# calculate median of unflagged angle(data/model)
# ls_weights are sum of non-binary weights
dm_ratio = avg_data[rk] / avg_model[rk]
dm_ratio /= np.abs(dm_ratio) # This gives all channels roughly equal weight, moderating the effect of RFI (as in firstcal)
binary_flgs = np.isclose(avg_wgts[rk], 0.0) | np.isinf(dm_ratio) | np.isnan(dm_ratio)
avg_wgts[rk][binary_flgs] = 0.0
dm_ratio[binary_flgs] *= np.nan
if solver == 'linfit': # we want to fit the angles
ls_data[eqn_str] = np.nanmedian(np.angle(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)]), axis=1, keepdims=True)
elif solver in ['dft', 'ndim_fft']: # we want the full complex number
ls_data[eqn_str] = np.nanmedian(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
ls_wgts[eqn_str] = np.sum(avg_wgts[rk][:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
# set unobserved data to 0 with 0 weight
ls_wgts[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
ls_data[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
if solver == 'linfit': # build linear system for phase slopes and solve with linsolve
# setup linsolve and run
solver = linsolve.LinearSolver(ls_data, wgts=ls_wgts)
echo("...running linsolve", verbose=verbose)
fit = solver.solve()
echo("...finished linsolve", verbose=verbose)
elif solver in ['dft', 'ndim_fft']: # look for a peak angle slope by FTing across the array
if not np.all([split_pol(pol)[0] == split_pol(pol)[1] for pol in data.pols()]):
raise NotImplementedError('DFT/FFT solving of global phase not implemented for abscal with cross-polarizations.')
for k in ls_data:
ls_data[k][ls_wgts[k] == 0] = np.nan
# solve one polarization at a time
fit = {}
for pol in data.pols():
eqkeys = [k for k in bl_vecs.keys() if pols[k] == pol]
# reformat data into arrays for dft_phase_slope_solver
if solver == 'dft':
assert assume_2D, 'dft solver only works when the array is 2D. Try using ndim_fft instead.'
blx = np.array([bl_vecs[k][0] for k in eqkeys])
bly = np.array([bl_vecs[k][1] for k in eqkeys])
data_array = np.array([ls_data[k] for k in eqkeys])
slope_x, slope_y = dft_phase_slope_solver(blx, bly, data_array)
fit['Phi_0_{}'.format(split_pol(pol)[0])] = slope_x
fit['Phi_1_{}'.format(split_pol(pol)[0])] = slope_y
# Perform ndim_fft solver
elif solver == 'ndim_fft':
slopes = ndim_fft_phase_slope_solver({k: ls_data[k] for k in eqkeys}, {k: bl_vecs[k] for k in eqkeys},
assume_2D=assume_2D, zero_pad=zero_pad, bl_error_tol=tol)
for d, slope in enumerate(slopes):
fit[f'Phi_{d}_{split_pol(pol)[0]}'] = slope
# time average
if time_avg:
Ntimes = list(fit.values())[0].shape[0]
for k in fit:
fit[k] = np.repeat(np.moveaxis(np.median(fit[k], axis=0)[np.newaxis], 0, 0), Ntimes, axis=0)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'Phi_0' in p:
fit[p.replace('Phi_0', 'Phi_ew')] = fit[p]
del fit[p]
if 'Phi_1' in p:
fit[p.replace('Phi_1', 'Phi_ns')] = fit[p]
del fit[p]
return fit
else:
# compute gains, dotting each slope into the corresponding coordinate in that dimension
gains = {}
for ant in gain_ants:
Phis = [fit[f'Phi_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
gains[ant] = np.exp(1.0j * np.einsum('i,ijk,k->jk', antpos[ant[0]][0:len(Phis)],
Phis, np.ones(data[keys[0]].shape[1])))
return gains
def merge_gains(gains, merge_shared=True):
"""
Merge a list of gain (or flag) dictionaries.
If gains has boolean ndarray keys, interpret as flags
and merge with a logical OR.
Parameters:
-----------
gains : type=list or tuple, series of gain dictionaries with (ant, pol) keys
and complex ndarrays as values (or boolean ndarrays if flags)
merge_shared : type=bool, If True merge only shared keys, eliminating the others.
Otherwise, merge all keys.
Output:
-------
merged_gains : type=dictionary, merged gain (or flag) dictionary with same key-value
structure as input dict.
"""
# get shared keys
if merge_shared:
keys = sorted(set(reduce(operator.and_, [set(g.keys()) for g in gains])))
else:
keys = sorted(set(reduce(operator.add, [list(g.keys()) for g in gains])))
# form merged_gains dict
merged_gains = odict()
# determine if gains or flags from first entry in gains
fedflags = False
if gains[0][list(gains[0].keys())[0]].dtype == np.bool_:
fedflags = True
# iterate over keys
for i, k in enumerate(keys):
if fedflags:
merged_gains[k] = reduce(operator.add, [g.get(k, True) for g in gains])
else:
merged_gains[k] = reduce(operator.mul, [g.get(k, 1.0) for g in gains])
return merged_gains
def data_key_to_array_axis(data, key_index, array_index=-1, avg_dict=None):
"""
move an index of data.keys() into the data axes
Parameters:
-----------
data : type=DataContainer, complex visibility data with
antenna-pair + pol tuples for keys, in DataContainer dictionary format.
key_index : integer, index of keys to consolidate into data arrays
array_index : integer, which axes of data arrays to append to
avg_dict : DataContainer, a dictionary with same keys as data
that will have its data arrays averaged along key_index
Result:
-------
new_data : DataContainer, complex visibility data
with key_index of keys moved into the data arrays
new_avg_dict : copy of avg_dict. Only returned if avg_dict is not None.
popped_keys : unique list of keys moved into data array axis
"""
# instantiate new data object
new_data = odict()
new_avg = odict()
# get keys
keys = list(data.keys())
# sort keys across key_index
key_sort = np.argsort(np.array(keys, dtype=np.object)[:, key_index])
keys = list(map(lambda i: keys[i], key_sort))
popped_keys = np.unique(np.array(keys, dtype=np.object)[:, key_index])
# get new keys
new_keys = list(map(lambda k: k[:key_index] + k[key_index + 1:], keys))
new_unique_keys = []
# iterate over new_keys
for i, nk in enumerate(new_keys):
# check for unique keys
if nk in new_unique_keys:
continue
new_unique_keys.append(nk)
# get all instances of redundant keys
ravel = list(map(lambda k: k == nk, new_keys))
# iterate over redundant keys and consolidate into new arrays
arr = []
avg_arr = []
for j, b in enumerate(ravel):
if b:
arr.append(data[keys[j]])
if avg_dict is not None:
avg_arr.append(avg_dict[keys[j]])
# assign to new_data
new_data[nk] = np.moveaxis(arr, 0, array_index)
if avg_dict is not None:
new_avg[nk] = np.nanmean(avg_arr, axis=0)
if avg_dict is not None:
return new_data, new_avg, popped_keys
else:
return new_data, popped_keys
def array_axis_to_data_key(data, array_index, array_keys, key_index=-1, copy_dict=None):
"""
move an axes of data arrays in data out of arrays
and into a unique key index in data.keys()
Parameters:
-----------
data : DataContainer, complex visibility data with
antenna-pair (+ pol + other) tuples for keys
array_index : integer, which axes of data arrays
to extract from arrays and move into keys
array_keys : list, list of new key from array elements. must have length
equal to length of data_array along axis array_index
key_index : integer, index within the new set of keys to insert array_keys
copy_dict : DataContainer, a dictionary with same keys as data
that will have its data arrays copied along array_keys
Output:
-------
new_data : DataContainer, complex visibility data
with array_index of data arrays extracted and moved
into a unique set of keys
new_copy : DataContainer, copy of copy_dict
with array_index of data arrays copied to unique keys
"""
# instantiate new object
new_data = odict()
new_copy = odict()
# get keys
keys = sorted(data.keys())
new_keys = []
# iterate over keys
for i, k in enumerate(keys):
# iterate overy new array keys
for j, ak in enumerate(array_keys):
new_key = list(k)
if key_index == -1:
new_key.insert(len(new_key), ak)
else:
new_key.insert(key_index, ak)
new_key = tuple(new_key)
new_data[new_key] = np.take(data[k], j, axis=array_index)
if copy_dict is not None:
new_copy[new_key] = copy.copy(copy_dict[k])
if copy_dict is not None:
return new_data, new_copy
else:
return new_data
def wiener(data, window=(5, 11), noise=None, medfilt=True, medfilt_kernel=(3, 9), array=False):
"""
wiener filter complex visibility data. this might be used in constructing
model reference. See scipy.signal.wiener for details on method.
Parameters:
-----------
data : type=DataContainer, ADataContainer dictionary holding complex visibility data
unelss array is True
window : type=tuple, wiener-filter window along each axis of data
noise : type=float, estimate of noise. if None will estimate itself
medfilt : type=bool, if True, median filter data before wiener filtering
medfilt_kernel : type=tuple, median filter kernel along each axis of data
array : type=boolean, if True, feeding a single ndarray, rather than a dictionary
Output: (new_data)
-------
new_data type=DataContainer, DataContainer dictionary holding new visibility data
"""
# check if data is an array
if array:
data = {'arr': data}
new_data = odict()
for i, k in enumerate(list(data.keys())):
real = np.real(data[k])
imag = np.imag(data[k])
if medfilt:
real = signal.medfilt(real, kernel_size=medfilt_kernel)
imag = signal.medfilt(imag, kernel_size=medfilt_kernel)
new_data[k] = signal.wiener(real, mysize=window, noise=noise) + \
1j * signal.wiener(imag, mysize=window, noise=noise)
if array:
return new_data['arr']
else:
return DataContainer(new_data)
def interp2d_vis(model, model_lsts, model_freqs, data_lsts, data_freqs, flags=None,
kind='cubic', flag_extrapolate=True, medfilt_flagged=True, medfilt_window=(3, 7),
fill_value=None):
"""
Interpolate complex visibility model onto the time & frequency basis of
a data visibility. See below for notes on flag propagation if flags is provided.
Parameters:
-----------
model : type=DataContainer, holds complex visibility for model
keys are antenna-pair + pol tuples, values are 2d complex visibility
with shape (Ntimes, Nfreqs).
model_lsts : 1D array of the model time axis, dtype=float, shape=(Ntimes,)
model_freqs : 1D array of the model freq axis, dtype=float, shape=(Nfreqs,)
data_lsts : 1D array of the data time axis, dtype=float, shape=(Ntimes,)
data_freqs : 1D array of the data freq axis, dtype=float, shape=(Nfreqs,)
flags : type=DataContainer, dictionary containing model flags. Can also contain model wgts
as floats and will convert to booleans appropriately.
kind : type=str, kind of interpolation, options=['linear', 'cubic', 'quintic']
medfilt_flagged : type=bool, if True, before interpolation, replace flagged pixels with output from
a median filter centered on each flagged pixel.
medfilt_window : type=tuple, extent of window for median filter across the (time, freq) axes.
Even numbers are rounded down to odd number.
flag_extrapolate : type=bool, flag extrapolated data_lsts if True.
fill_value : type=float, if fill_value is None, extrapolated points are extrapolated
else they are filled with fill_value.
Output: (new_model, new_flags)
-------
new_model : interpolated model, type=DataContainer
new_flags : flags associated with interpolated model, type=DataContainer
Notes:
------
If the data has flagged pixels, it is recommended to turn medfilt_flagged to True. This runs a median
filter on the flagged pixels and replaces their values with the results, but they remain flagged.
This happens *before* interpolation. This means that interpolation near flagged pixels
aren't significantly biased by their presence.
In general, if flags are fed, flags are propagated if a flagged pixel is a nearest neighbor
of an interpolated pixel.
"""
# make flags
new_model = odict()
new_flags = odict()
# get nearest neighbor points
freq_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_freqs - x)), data_freqs)))
time_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts)))
freq_nn, time_nn = np.meshgrid(freq_nn, time_nn)
# get model indices meshgrid
mod_F, mod_L = np.meshgrid(np.arange(len(model_freqs)), np.arange(len(model_lsts)))
# raise warning on flags
if flags is not None and medfilt_flagged is False:
print("Warning: flags are fed, but medfilt_flagged=False. \n"
"This may cause weird behavior of interpolated points near flagged data.")
# ensure flags are booleans
if flags is not None:
if np.issubdtype(flags[list(flags.keys())[0]].dtype, np.floating):
flags = DataContainer(odict(list(map(lambda k: (k, ~flags[k].astype(np.bool)), flags.keys()))))
# loop over keys
for i, k in enumerate(list(model.keys())):
# get model array
m = model[k]
# get real and imag separately
real = np.real(m)
imag = np.imag(m)
# median filter flagged data if desired
if medfilt_flagged and flags is not None:
# get extent of window along freq and time
f_ext = int((medfilt_window[1] - 1) / 2.)
t_ext = int((medfilt_window[0] - 1) / 2.)
# set flagged data to nan
real[flags[k]] *= np.nan
imag[flags[k]] *= np.nan
# get flagged indices
f_indices = mod_F[flags[k]]
l_indices = mod_L[flags[k]]
# construct fill arrays
real_fill = np.empty(len(f_indices), np.float)
imag_fill = np.empty(len(f_indices), np.float)
# iterate over flagged data and replace w/ medfilt
for j, (find, tind) in enumerate(zip(f_indices, l_indices)):
tlow, thi = tind - t_ext, tind + t_ext + 1
flow, fhi = find - f_ext, find + f_ext + 1
ll = 0
while True:
# iterate until window has non-flagged data in it
# with a max of 10 iterations
if tlow < 0:
tlow = 0
if flow < 0:
flow = 0
r_med = np.nanmedian(real[tlow:thi, flow:fhi])
i_med = np.nanmedian(imag[tlow:thi, flow:fhi])
tlow -= 2
thi += 2
flow -= 2
fhi += 2
ll += 1
if not (np.isnan(r_med) or np.isnan(i_med)):
break
if ll > 10:
break
real_fill[j] = r_med
imag_fill[j] = i_med
# fill real and imag
real[l_indices, f_indices] = real_fill
imag[l_indices, f_indices] = imag_fill
# flag residual nans
resid_nans = np.isnan(real) + np.isnan(imag)
flags[k] += resid_nans
# replace residual nans
real[resid_nans] = 0.0
imag[resid_nans] = 0.0
# propagate flags to nearest neighbor
if flags is not None:
f = flags[k][time_nn, freq_nn]
# check f is boolean type
if np.issubdtype(f.dtype, np.floating):
f = ~(f.astype(np.bool))
else:
f = np.zeros_like(real, bool)
# interpolate
interp_real = interpolate.interp2d(model_freqs, model_lsts, real, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts)
interp_imag = interpolate.interp2d(model_freqs, model_lsts, imag, kind=kind, copy=False, bounds_error=False, fill_value=fill_value)(data_freqs, data_lsts)
# flag extrapolation if desired
if flag_extrapolate:
time_extrap = np.where((data_lsts > model_lsts.max() + 1e-6) | (data_lsts < model_lsts.min() - 1e-6))
freq_extrap = np.where((data_freqs > model_freqs.max() + 1e-6) | (data_freqs < model_freqs.min() - 1e-6))
f[time_extrap, :] = True
f[:, freq_extrap] = True
# rejoin
new_model[k] = interp_real + 1j * interp_imag
new_flags[k] = f
return DataContainer(new_model), DataContainer(new_flags)
def rephase_vis(model, model_lsts, data_lsts, bls, freqs, inplace=False, flags=None, max_dlst=0.005, latitude=-30.72152):
"""
Rephase model visibility data onto LST grid of data_lsts.
Parameters:
-----------
model : type=DataContainer, holds complex visibility for model
keys are antenna-pair + pol tuples, values are 2d complex visibility
with shape (Ntimes, Nfreqs)
model_lsts : 1D array of the LST grid in model [radians], dtype=float, shape=(Ntimes,)
data_lsts : 1D array of the LST grid in data [radians], dtype=float, shape=(Ntimes,)
bls : type=dictionary, ant-pair keys that holds baseline position vector in ENU frame in meters
freqs : type=float ndarray, holds frequency channels of model in Hz.
inplace : type=bool, if True edit data in memory, else make a copy and return
flags : type=DataContainer, holds model flags
max_dlst : type=bool, maximum dlst [radians] to allow for rephasing, otherwise flag data.
latitude : type=float, latitude of array in degrees North
Return: (new_model, new_flags)
-------
new_model : DataContainer with rephased model
new_flags : DataContainer with new flags
"""
# unravel LST array if necessary
data_lsts[data_lsts < data_lsts[0]] += 2 * np.pi
# get nearest neighbor model points
lst_nn = np.array(list(map(lambda x: np.argmin(np.abs(model_lsts - x)), data_lsts)))
# get dlst array
dlst = data_lsts - model_lsts[lst_nn]
# flag dlst above threshold
flag_lst = np.zeros_like(dlst, np.bool)
flag_lst[np.abs(dlst) > max_dlst] = True
# make new_model and new_flags
if inplace:
new_model = model
else:
new_model = odict()
if inplace and flags is not None:
new_flags = flags
else:
new_flags = odict()
for k in model.keys():
m = model[k][lst_nn, :]
new_model[k] = m
if flags is None:
new_flags[k] = np.zeros_like(m, np.bool)
else:
new_flags[k] = flags[k][lst_nn, :]
new_flags[k][flag_lst, :] = True
# rephase
if inplace:
utils.lst_rephase(new_model, bls, freqs, dlst, lat=latitude, inplace=True)
return new_model, new_flags
else:
new_model = utils.lst_rephase(new_model, bls, freqs, dlst, lat=latitude, inplace=False)
return DataContainer(new_model), DataContainer(new_flags)
def fill_dict_nans(data, wgts=None, nan_fill=None, inf_fill=None, array=False):
"""
take a dictionary and re-fill nan and inf ndarray values.
Parameters:
-----------
data : type=DataContainer, visibility dictionary in AbsCal dictionary format
wgts : type=DataContainer, weights dictionary matching shape of data to also fill
nan_fill : if not None, fill nans with nan_fill
inf_fill : if not None, fill infs with inf_fill
array : type=boolean, if True, data is a single ndarray to perform operation on
"""
if array:
if nan_fill is not None:
nan_select = np.isnan(data)
data[nan_select] = nan_fill
if wgts is not None:
wgts[nan_select] = 0.0
if inf_fill is not None:
inf_select = np.isinf(data)
data[inf_select] = inf_fill
if wgts is not None:
wgts[inf_select] = 0.0
else:
for i, k in enumerate(data.keys()):
if nan_fill is not None:
# replace nan
nan_select = np.isnan(data[k])
data[k][nan_select] = nan_fill
if wgts is not None:
wgts[k][nan_select] = 0.0
if inf_fill is not None:
# replace infs
inf_select = np.isinf(data[k])
data[k][inf_select] = inf_fill
if wgts is not None:
wgts[k][inf_select] = 0.0
def flatten(nested_list):
""" flatten a nested list """
return [item for sublist in nested_list for item in sublist]
class Baseline(object):
"""
Baseline object for making antenna-independent, unique baseline labels
for baselines up to 1km in length to an absolute precison of 10 cm.
Only __eq__ operator is overloaded.
"""
def __init__(self, bl, tol=2.0):
"""
bl : list containing [dx, dy, dz] float separation in meters
tol : tolerance for baseline length comparison in meters
"""
self.label = "{:06.1f}:{:06.1f}:{:06.1f}".format(float(bl[0]), float(bl[1]), float(bl[2]))
self.bl = np.array(bl, dtype=np.float)
self.tol = tol
def __repr__(self):
return self.label
@property
def unit(self):
return self.bl / np.linalg.norm(self.bl)
@property
def len(self):
return np.linalg.norm(self.bl)
def __eq__(self, B2):
tol = np.max([self.tol, B2.tol])
# check same length
if np.isclose(self.len, B2.len, atol=tol):
# check x, y, z
equiv = bool(reduce(operator.mul, list(map(lambda x: np.isclose(*x, atol=tol), zip(self.bl, B2.bl)))))
dot = np.dot(self.unit, B2.unit)
if equiv:
return True
# check conjugation
elif np.isclose(np.arccos(dot), np.pi, atol=tol / self.len) or (dot < -1.0):
return 'conjugated'
# else return False
else:
return False
else:
return False
def match_red_baselines(model, model_antpos, data, data_antpos, tol=1.0, verbose=True):
"""
Match unique model baseline keys to unique data baseline keys based on positional redundancy.
Ideally, both model and data contain only unique baselines, in which case there is a
one-to-one mapping. If model contains extra redundant baselines, these are not propagated
to new_model. If data contains extra redundant baselines, the lowest ant1-ant2 pair is chosen
as the baseline key to insert into model.
Parameters:
-----------
model : type=DataContainer, model dictionary holding complex visibilities
must conform to DataContainer dictionary format.
model_antpos : type=dictionary, dictionary holding antennas positions for model dictionary
keys are antenna integers, values are ndarrays of position vectors in meters
data : type=DataContainer, data dictionary holding complex visibilities.
must conform to DataContainer dictionary format.
data_antpos : type=dictionary, dictionary holding antennas positions for data dictionary
same format as model_antpos
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
Output: (data)
-------
new_model : type=DataContainer, dictionary holding complex visibilities from model that
had matching baselines to data
"""
# create baseline keys for model
model_keys = list(model.keys())
model_bls = np.array(list(map(lambda k: Baseline(model_antpos[k[1]] - model_antpos[k[0]], tol=tol), model_keys)))
# create baseline keys for data
data_keys = list(data.keys())
data_bls = np.array(list(map(lambda k: Baseline(data_antpos[k[1]] - data_antpos[k[0]], tol=tol), data_keys)))
# iterate over data baselines
new_model = odict()
for i, bl in enumerate(model_bls):
# compre bl to all model_bls
comparison = np.array(list(map(lambda mbl: bl == mbl, data_bls)), np.str)
# get matches
matches = np.where((comparison == 'True') | (comparison == 'conjugated'))[0]
# check for matches
if len(matches) == 0:
echo("found zero matches in data for model {}".format(model_keys[i]), verbose=verbose)
continue
else:
if len(matches) > 1:
echo("found more than 1 match in data to model {}: {}".format(model_keys[i], list(map(lambda j: data_keys[j], matches))), verbose=verbose)
# assign to new_data
if comparison[matches[0]] == 'True':
new_model[data_keys[matches[0]]] = model[model_keys[i]]
elif comparison[matches[0]] == 'conjugated':
new_model[data_keys[matches[0]]] = np.conj(model[model_keys[i]])
return DataContainer(new_model)
def avg_data_across_red_bls(data, antpos, wgts=None, broadcast_wgts=True, tol=1.0,
mirror_red_data=False, reds=None):
"""
Given complex visibility data spanning one or more redundant
baseline groups, average redundant visibilities and return
Parameters:
-----------
data : type=DataContainer, data dictionary holding complex visibilities.
must conform to AbsCal dictionary format.
antpos : type=dictionary, antenna position dictionary
wgts : type=DataContainer, data weights as float
broadcast_wgts : type=boolean, if True, take geometric mean of input weights as output weights,
else use mean. If True, this has the effect of broadcasting a single flag from any particular
baseline to all baselines in a baseline group.
tol : type=float, redundant baseline tolerance threshold
mirror_red_data : type=boolean, if True, mirror average visibility across red bls
reds : list of list of redundant baselines with polarization strings.
If None, reds is produced from antpos.
Output: (red_data, red_wgts, red_keys)
-------
"""
warnings.warn("Warning: This function will be deprecated in the next hera_cal release.")
# get data keys
keys = list(data.keys())
# get data, wgts and ants
pols = np.unique(list(map(lambda k: k[2], data.keys())))
ants = np.unique(np.concatenate(keys))
if wgts is None:
wgts = DataContainer(odict(list(map(lambda k: (k, np.ones_like(data[k]).astype(np.float)), data.keys()))))
# get redundant baselines if not provided
if reds is None:
reds = redcal.get_reds(antpos, bl_error_tol=tol, pols=pols)
# strip reds of keys not in data
stripped_reds = []
for i, bl_group in enumerate(reds):
group = []
for k in bl_group:
if k in data:
group.append(k)
if len(group) > 0:
stripped_reds.append(group)
# make red_data dictionary
red_data = odict()
red_wgts = odict()
# iterate over reds
for i, bl_group in enumerate(stripped_reds):
# average redundant baseline group
d = np.nansum(list(map(lambda k: data[k] * wgts[k], bl_group)), axis=0)
d /= np.nansum(list(map(lambda k: wgts[k], bl_group)), axis=0)
# get wgts
if broadcast_wgts:
w = np.array(reduce(operator.mul, list(map(lambda k: wgts[k], bl_group))), np.float) ** (1. / len(bl_group))
else:
w = np.array(reduce(operator.add, list(map(lambda k: wgts[k], bl_group))), np.float) / len(bl_group)
# iterate over bl_group
for j, key in enumerate(sorted(bl_group)):
# assign to red_data and wgts
red_data[key] = d
red_wgts[key] = w
# break if no mirror
if mirror_red_data is False:
break
# get red_data keys
red_keys = list(red_data.keys())
return DataContainer(red_data), DataContainer(red_wgts), red_keys
def mirror_data_to_red_bls(data, antpos, tol=2.0, weights=False):
"""
Given unique baseline data (like omnical model visibilities),
copy the data over to all other baselines in the same redundant group.
If weights==True, treat data as a wgts dictionary and multiply values
by their redundant baseline weighting.
Parameters:
-----------
data : data DataContainer in hera_cal.DataContainer form
antpos : type=dictionary, antenna positions dictionary
keys are antenna integers, values are ndarray baseline vectors.
tol : type=float, redundant baseline distance tolerance in units of baseline vectors
weights : type=bool, if True, treat data as a wgts dictionary and multiply by redundant weighting.
Output: (red_data)
-------
red_data : type=DataContainer, data dictionary in AbsCal form, with unique baseline data
distributed to redundant baseline groups.
if weights == True:
red_data is a real-valued wgts dictionary with redundant baseline weighting muliplied in.
"""
# get data keys
keys = list(data.keys())
# get polarizations in data
pols = data.pols()
# get redundant baselines
reds = redcal.get_reds(antpos, bl_error_tol=tol, pols=pols)
# make red_data dictionary
red_data = odict()
# iterate over data keys
for i, k in enumerate(keys):
# find which bl_group this key belongs to
match = np.array(list(map(lambda r: k in r, reds)))
conj_match = np.array(list(map(lambda r: reverse_bl(k) in r, reds)))
# if no match, just copy data over to red_data
if True not in match and True not in conj_match:
red_data[k] = copy.copy(data[k])
else:
# iterate over matches
for j, (m, cm) in enumerate(zip(match, conj_match)):
if weights:
# if weight dictionary, add repeated baselines
if m:
if k not in red_data:
red_data[k] = copy.copy(data[k])
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j]) - 1
else:
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j])
elif cm:
if k not in red_data:
red_data[k] = copy.copy(data[k])
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j]) - 1
else:
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)] + len(reds[j])
else:
# if match, insert all bls in bl_group into red_data
if m:
for bl in reds[j]:
red_data[bl] = copy.copy(data[k])
elif cm:
for bl in reds[j]:
red_data[bl] = np.conj(data[k])
# re-sort, square if weights to match linsolve
if weights:
for i, k in enumerate(red_data):
red_data[k][red_data[k].astype(np.bool)] = red_data[k][red_data[k].astype(np.bool)]**(2.0)
else:
red_data = odict([(k, red_data[k]) for k in sorted(red_data)])
return DataContainer(red_data)
def match_times(datafile, modelfiles, filetype='uvh5', atol=1e-5):
"""
Match start and end LST of datafile to modelfiles. Each file in modelfiles needs
to have the same integration time.
Args:
datafile : type=str, path to data file
modelfiles : type=list of str, list of filepaths to model files ordered according to file start time
filetype : str, options=['uvh5', 'miriad']
Returns:
matched_modelfiles : type=list, list of modelfiles that overlap w/ datafile in LST
"""
# get lst arrays
data_dlst, data_dtime, data_lsts, data_times = io.get_file_times(datafile, filetype=filetype)
model_dlsts, model_dtimes, model_lsts, model_times = io.get_file_times(modelfiles, filetype=filetype)
# shift model files relative to first file & first index if needed
for ml in model_lsts:
if ml[0] < model_lsts[0][0]:
ml += 2 * np.pi
# get model start and stop, buffering by dlst / 2
model_starts = np.asarray([ml[0] - md / 2.0 for ml, md in zip(model_lsts, model_dlsts)])
model_ends = np.asarray([ml[-1] + md / 2.0 for ml, md in zip(model_lsts, model_dlsts)])
# shift data relative to model if needed
if data_lsts[-1] < model_starts[0]:
data_lsts += 2 * np.pi
# select model files
match = np.asarray(modelfiles)[(model_starts < data_lsts[-1] + atol)
& (model_ends > data_lsts[0] - atol)]
return match
def cut_bls(datacontainer, bls=None, min_bl_cut=None, max_bl_cut=None, inplace=False):
"""
Cut visibility data based on min and max baseline length.
Parameters
----------
datacontainer : DataContainer object to perform baseline cut on
bls : dictionary, holding baseline position vectors.
keys are antenna-pair tuples and values are baseline vectors in meters.
If bls is None, will look for antpos attr in datacontainer.
min_bl_cut : float, minimum baseline separation [meters] to keep in data
max_bl_cut : float, maximum baseline separation [meters] to keep in data
inplace : bool, if True edit data in input object, else make a copy.
Output
------
datacontainer : DataContainer object with bl cut enacted
"""
if not inplace:
datacontainer = copy.deepcopy(datacontainer)
if min_bl_cut is None:
min_bl_cut = 0.0
if max_bl_cut is None:
max_bl_cut = 1e10
if bls is None:
# look for antpos in dc
if not hasattr(datacontainer, 'antpos'):
raise ValueError("If bls is not fed, datacontainer must have antpos attribute.")
bls = odict()
ap = datacontainer.antpos
for bl in datacontainer.keys():
if bl[0] not in ap or bl[1] not in ap:
continue
bls[bl] = ap[bl[1]] - ap[bl[0]]
for k in list(datacontainer.keys()):
bl_len = np.linalg.norm(bls[k])
if k not in bls:
continue
if bl_len > max_bl_cut or bl_len < min_bl_cut:
del datacontainer[k]
assert len(datacontainer) > 0, "no baselines were kept after baseline cut..."
return datacontainer
class AbsCal(object):
"""
AbsCal object used to for phasing and scaling visibility data to an absolute reference model.
A few different calibration methods exist. These include:
1) per-antenna amplitude logarithmic calibration solves the equation:
ln[abs(V_ij^data / V_ij^model)] = eta_i + eta_j
2) per-antenna phase logarithmic calibration solves the equation:
angle(V_ij^data / V_ij^model) = phi_i - phi_j
3) delay linear calibration solves the equation:
delay(V_ij^data / V_ij^model) = delay(g_i) - delay(g_j)
= tau_i - tau_j
where tau is the delay that can be turned
into a complex gain via: g = exp(i * 2pi * tau * freqs).
4) delay slope linear calibration solves the equation:
delay(V_ij^data / V_ij^model) = dot(T_dly, B_ij)
where T_dly is a delay slope in [ns / meter]
and B_ij is the baseline vector between ant i and j.
5) frequency-independent phase slope calibration
median_over_freq(angle(V_ij^data / V_ij^model)) = dot(Phi, B_ji)
where Phi is a phase slope in [radians / meter]
and B_ij is the baseline vector between ant i and j.
6) Average amplitude linear calibration solves the equation:
log|V_ij^data / V_ij^model| = log|g_avg_i| + log|g_avg_j|
7) Tip-Tilt phase logarithmic calibration solves the equation
angle(V_ij^data / V_ij^model) = psi + dot(TT_Phi, B_ij)
where psi is an overall gain phase scalar,
TT_Phi is the gain phase slope vector [radians / meter]
and B_ij is the baseline vector between antenna i and j.
Methods (1), (2) and (3) can be thought of as general bandpass solvers, whereas
methods (4), (5), (6), and (7) are methods that would be used for data that has already
been redundantly calibrated.
Be warned that the linearizations of the phase solvers suffer from phase wrapping
pathologies, meaning that a delay calibration should generally precede a
phs_logcal or a TT_phs_logcal bandpass routine.
"""
def __init__(self, model, data, refant=None, wgts=None, antpos=None, freqs=None,
min_bl_cut=None, max_bl_cut=None, bl_taper_fwhm=None, verbose=True,
filetype='miriad', input_cal=None):
"""
AbsCal object used to for phasing and scaling visibility data to an absolute reference model.
The format of model, data and wgts is in a dictionary format, with the convention that
keys contain antennas-pairs + polarization, Ex. (1, 2, 'nn'), and values contain 2D complex
ndarrays with [0] axis indexing time and [1] axis frequency.
Parameters:
-----------
model : Visibility data of refence model, type=dictionary or DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
Optionally, model can be a path to a pyuvdata-supported file, a
pyuvdata.UVData object or hera_cal.HERAData object,
or a list of either.
data : Visibility data, type=dictionary or DataContainer
keys are antenna-pair + polarization tuples, Ex. (1, 2, 'nn').
values are complex ndarray visibilities.
these must be 2D arrays, with [0] axis indexing time
and [1] axis indexing frequency.
Optionally, data can be a path to a pyuvdata-supported file, a
pyuvdata.UVData object or hera_cal.HERAData object,
or a list of either. In this case, antpos, freqs
and wgts are overwritten from arrays in data.
refant : antenna number integer for reference antenna
The refence antenna is used in the phase solvers, where an absolute phase is applied to all
antennas such that the refant's phase is set to identically zero.
wgts : weights of the data, type=dictionary or DataContainer, [default=None]
keys are antenna pair + pol tuples (must match model), values are real floats
matching shape of model and data
antpos : type=dictionary, dict of antenna position vectors in ENU (topo) frame in meters.
origin of coordinates does not matter, but preferably are centered in the array.
keys are antenna integers and values are ndarray position vectors,
containing [East, North, Up] coordinates.
Can be generated from a pyuvdata.UVData instance via
----
#!/usr/bin/env python
uvd = pyuvdata.UVData()
uvd.read_miriad(<filename>)
antenna_pos, ants = uvd.get_ENU_antpos()
antpos = dict(zip(ants, antenna_pos))
----
This is needed only for Tip Tilt, phase slope, and delay slope calibration.
freqs : ndarray of frequency array, type=ndarray
1d array containing visibility frequencies in Hz.
Needed for delay calibration.
min_bl_cut : float, eliminate all visibilities with baseline separation lengths
smaller than min_bl_cut. This is assumed to be in ENU coordinates with units of meters.
max_bl_cut : float, eliminate all visibilities with baseline separation lengths
larger than max_bl_cut. This is assumed to be in ENU coordinates with units of meters.
bl_taper_fwhm : float, impose a gaussian taper on the data weights as a function of
bl separation length, with a specified fwhm [meters]
filetype : str, if data and/or model are fed as strings, this is their filetype
input_cal : filepath to calfits, UVCal or HERACal object with gain solutions to
apply to data on-the-fly via hera_cal.apply_cal.calibrate_in_place
"""
# set pols to None
pols = None
# load model if necessary
if isinstance(model, list) or isinstance(model, np.ndarray) or isinstance(model, str) or issubclass(model.__class__, UVData):
(model, model_flags, model_antpos, model_ants, model_freqs, model_lsts,
model_times, model_pols) = io.load_vis(model, pop_autos=True, return_meta=True, filetype=filetype)
# load data if necessary
if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, str) or issubclass(data.__class__, UVData):
(data, flags, data_antpos, data_ants, data_freqs, data_lsts,
data_times, data_pols) = io.load_vis(data, pop_autos=True, return_meta=True, filetype=filetype)
pols = data_pols
freqs = data_freqs
antpos = data_antpos
# apply calibration
if input_cal is not None:
if 'flags' not in locals():
flags = None
uvc = io.to_HERACal(input_cal)
gains, cal_flags, quals, totquals = uvc.read()
apply_cal.calibrate_in_place(data, gains, data_flags=flags, cal_flags=cal_flags, gain_convention=uvc.gain_convention)
# get shared keys and pols
self.keys = sorted(set(model.keys()) & set(data.keys()))
assert len(self.keys) > 0, "no shared keys exist between model and data"
if pols is None:
pols = np.unique(list(map(lambda k: k[2], self.keys)))
self.pols = pols
self.Npols = len(self.pols)
self.gain_pols = np.unique(list(map(lambda p: list(split_pol(p)), self.pols)))
self.Ngain_pols = len(self.gain_pols)
# append attributes
self.model = DataContainer(dict([(k, model[k]) for k in self.keys]))
self.data = DataContainer(dict([(k, data[k]) for k in self.keys]))
# setup frequencies
self.freqs = freqs
if self.freqs is None:
self.Nfreqs = None
else:
self.Nfreqs = len(self.freqs)
# setup weights
if wgts is None:
# use data flags if present
if 'flags' in locals() and flags is not None:
wgts = DataContainer(dict([(k, (~flags[k]).astype(np.float)) for k in self.keys]))
else:
wgts = DataContainer(dict([(k, np.ones_like(data[k], dtype=np.float)) for k in self.keys]))
if 'model_flags' in locals():
for k in self.keys:
wgts[k] *= (~model_flags[k]).astype(np.float)
self.wgts = wgts
# setup ants
self.ants = np.unique(np.concatenate(list(map(lambda k: k[:2], self.keys))))
self.Nants = len(self.ants)
if refant is None:
refant = self.keys[0][0]
print("using {} for reference antenna".format(refant))
else:
assert refant in self.ants, "refant {} not found in self.ants".format(refant)
self.refant = refant
# setup antenna positions
self._set_antpos(antpos)
# setup gain solution keys
self._gain_keys = [[(a, p) for a in self.ants] for p in self.gain_pols]
# perform baseline cut
if min_bl_cut is not None or max_bl_cut is not None:
assert self.antpos is not None, "can't request a bl_cut if antpos is not fed"
_model = cut_bls(self.model, self.bls, min_bl_cut, max_bl_cut)
_data = cut_bls(self.data, self.bls, min_bl_cut, max_bl_cut)
_wgts = cut_bls(self.wgts, self.bls, min_bl_cut, max_bl_cut)
# re-init
self.__init__(_model, _data, refant=self.refant, wgts=_wgts, antpos=self.antpos, freqs=self.freqs, verbose=verbose)
# enact a baseline weighting taper
if bl_taper_fwhm is not None:
assert self.antpos is not None, "can't request a baseline taper if antpos is not fed"
# make gaussian taper func
def taper(ratio):
return np.exp(-0.5 * ratio**2)
# iterate over baselines
for k in self.wgts.keys():
self.wgts[k] *= taper(np.linalg.norm(self.bls[k]) / bl_taper_fwhm)
def _set_antpos(self, antpos):
'''Helper function for replacing self.antpos, self.bls, and self.antpos_arr without affecting tapering or baseline cuts.
Useful for replacing true antenna positions with idealized ones derived from the redundancies.'''
self.antpos = antpos
self.antpos_arr = None
self.bls = None
if self.antpos is not None:
# center antpos about reference antenna
self.antpos = odict([(k, antpos[k] - antpos[self.refant]) for k in self.ants])
self.bls = odict([(x, self.antpos[x[0]] - self.antpos[x[1]]) for x in self.keys])
self.antpos_arr = np.array(list(map(lambda x: self.antpos[x], self.ants)))
self.antpos_arr -= np.median(self.antpos_arr, axis=0)
def amp_logcal(self, verbose=True):
"""
Call abscal_funcs.amp_logcal() method. see its docstring for more details.
Parameters:
-----------
verbose : type=boolean, if True print feedback to stdout
Result:
-------
per-antenna amplitude and per-antenna amp gains
can be accessed via the getter functions
self.ant_eta
self.ant_eta_arr
self.ant_eta_gain
self.ant_eta_gain_arr
"""
# set data quantities
model = self.model
data = self.data
wgts = copy.copy(self.wgts)
# run linsolve
fit = amp_logcal(model, data, wgts=wgts, verbose=verbose)
# form result array
self._ant_eta = odict(list(map(lambda k: (k, copy.copy(fit["eta_{}_{}".format(k[0], k[1])])), flatten(self._gain_keys))))
self._ant_eta_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._ant_eta[k], pk)), self._gain_keys)), 0, -1)
def phs_logcal(self, avg=False, verbose=True):
"""
call abscal_funcs.phs_logcal() method. see its docstring for more details.
Parameters:
-----------
avg : type=boolean, if True, average solution across time and frequency
verbose : type=boolean, if True print feedback to stdout
Result:
-------
per-antenna phase and per-antenna phase gains
can be accessed via the methods
self.ant_phi
self.ant_phi_arr
self.ant_phi_gain
self.ant_phi_gain_arr
"""
# assign data
model = self.model
data = self.data
wgts = copy.deepcopy(self.wgts)
# run linsolve
fit = phs_logcal(model, data, wgts=wgts, refant=self.refant, verbose=verbose)
# form result array
self._ant_phi = odict(list(map(lambda k: (k, copy.copy(fit["phi_{}_{}".format(k[0], k[1])])), flatten(self._gain_keys))))
self._ant_phi_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._ant_phi[k], pk)), self._gain_keys)), 0, -1)
# take time and freq average
if avg:
self._ant_phi = odict(list(map(lambda k: (k, np.ones_like(self._ant_phi[k])
* np.angle(np.median(np.real(np.exp(1j * self._ant_phi[k])))
+ 1j * np.median(np.imag(np.exp(1j * self._ant_phi[k]))))), flatten(self._gain_keys))))
self._ant_phi_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._ant_phi[k], pk)), self._gain_keys)), 0, -1)
def delay_lincal(self, medfilt=True, kernel=(1, 11), verbose=True, time_avg=False, edge_cut=0):
"""
Solve for per-antenna delay according to the equation
by calling abscal_funcs.delay_lincal method.
See abscal_funcs.delay_lincal for details.
Parameters:
-----------
medfilt : boolean, if True median filter data before fft
kernel : size of median filter across (time, freq) axes, type=(int, int)
time_avg : boolean, if True, replace resultant antenna delays with the median across time
edge_cut : int, number of channels to exclude at each band edge in FFT window
Result:
-------
per-antenna delays, per-antenna delay gains, per-antenna phase + phase gains
can be accessed via the methods
self.ant_dly
self.ant_dly_gain
self.ant_dly_arr
self.ant_dly_gain_arr
self.ant_dly_phi
self.ant_dly_phi_gain
self.ant_dly_phi_arr
self.ant_dly_phi_gain_arr
"""
# check for freq data
if self.freqs is None:
raise AttributeError("cannot delay_lincal without self.freqs array")
# assign data
model = self.model
data = self.data
wgts = self.wgts
# get freq channel width
df = np.median(np.diff(self.freqs))
# run delay_lincal
fit = delay_lincal(model, data, wgts=wgts, refant=self.refant, medfilt=medfilt, df=df,
f0=self.freqs[0], kernel=kernel, verbose=verbose, edge_cut=edge_cut)
# time average
if time_avg:
k = flatten(self._gain_keys)[0]
Ntimes = fit["tau_{}_{}".format(k[0], k[1])].shape[0]
for i, k in enumerate(flatten(self._gain_keys)):
tau_key = "tau_{}_{}".format(k[0], k[1])
tau_avg = np.moveaxis(np.median(fit[tau_key], axis=0)[np.newaxis], 0, 0)
fit[tau_key] = np.repeat(tau_avg, Ntimes, axis=0)
phi_key = "phi_{}_{}".format(k[0], k[1])
gain = np.exp(1j * fit[phi_key])
real_avg = np.median(np.real(gain), axis=0)
imag_avg = np.median(np.imag(gain), axis=0)
phi_avg = np.moveaxis(np.angle(real_avg + 1j * imag_avg)[np.newaxis], 0, 0)
fit[phi_key] = np.repeat(phi_avg, Ntimes, axis=0)
# form result
self._ant_dly = odict(list(map(lambda k: (k, copy.copy(fit["tau_{}_{}".format(k[0], k[1])])), flatten(self._gain_keys))))
self._ant_dly_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._ant_dly[k], pk)), self._gain_keys)), 0, -1)
self._ant_dly_phi = odict(list(map(lambda k: (k, copy.copy(fit["phi_{}_{}".format(k[0], k[1])])), flatten(self._gain_keys))))
self._ant_dly_phi_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._ant_dly_phi[k], pk)), self._gain_keys)), 0, -1)
def delay_slope_lincal(self, medfilt=True, kernel=(1, 15), verbose=True, time_avg=False,
four_pol=False, edge_cut=0):
"""
Solve for an array-wide delay slope (a subset of the omnical degeneracies) by calling
abscal_funcs.delay_slope_lincal method. See abscal_funcs.delay_slope_lincal for details.
Parameters:
-----------
medfilt : boolean, if True median filter data before fft
kernel : size of median filter across (time, freq) axes, type=(int, int)
verbose : type=boolean, if True print feedback to stdout
time_avg : boolean, if True, replace the resultant delay slope with the median across time
four_pol : boolean, if True, form a joint polarization solution
edge_cut : int, number of channels to exclude at each band edge in FFT window
Result:
-------
delays slopes, per-antenna delay gains, per-antenna phase + phase gains
can be accessed via the methods
self.dly_slope
self.dly_slope_gain
self.dly_slope_arr
self.dly_slope_gain_arr
"""
# check for freq data
if self.freqs is None:
raise AttributeError("cannot delay_slope_lincal without self.freqs array")
# assign data
model = self.model
data = self.data
wgts = self.wgts
antpos = self.antpos
# get freq channel width
df = np.median(np.diff(self.freqs))
# run delay_slope_lincal
fit = delay_slope_lincal(model, data, antpos, wgts=wgts, refant=self.refant, medfilt=medfilt, df=df,
time_avg=time_avg, kernel=kernel, verbose=verbose, four_pol=four_pol, edge_cut=edge_cut)
# separate pols if four_pol
if four_pol:
for i, gp in enumerate(self.gain_pols):
fit['T_ew_{}'.format(gp)] = fit["T_ew"]
fit['T_ns_{}'.format(gp)] = fit["T_ns"]
fit.pop('T_ew')
fit.pop('T_ns')
# form result
self._dly_slope = odict(list(map(lambda k: (k, copy.copy(np.array([fit["T_ew_{}".format(k[1])], fit["T_ns_{}".format(k[1])]]))), flatten(self._gain_keys))))
self._dly_slope_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: np.array([self._dly_slope[k][0], self._dly_slope[k][1]]), pk)), self._gain_keys)), 0, -1)
def global_phase_slope_logcal(self, solver='linfit', tol=1.0, edge_cut=0, verbose=True):
"""
Solve for a frequency-independent spatial phase slope (a subset of the omnical degeneracies) by calling
abscal_funcs.global_phase_slope_logcal method. See abscal_funcs.global_phase_slope_logcal for details.
Parameters:
-----------
solver : 'linfit' uses linsolve to fit phase slope across the array,
'dft' uses a spatial Fourier transform to find a phase slope
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
edge_cut : int, number of channels to exclude at each band edge in phase slope solver
verbose : type=boolean, if True print feedback to stdout
Result:
-------
per-antenna delays, per-antenna delay gains, per-antenna phase + phase gains
can be accessed via the methods
self.phs_slope
self.phs_slope_gain
self.phs_slope_arr
self.phs_slope_gain_arr
"""
# assign data
model = self.model
data = self.data
wgts = self.wgts
antpos = self.antpos
# run global_phase_slope_logcal
fit = global_phase_slope_logcal(model, data, antpos, solver=solver, wgts=wgts,
refant=self.refant, verbose=verbose, tol=tol, edge_cut=edge_cut)
# form result
self._phs_slope = odict(list(map(lambda k: (k, copy.copy(np.array([fit["Phi_ew_{}".format(k[1])], fit["Phi_ns_{}".format(k[1])]]))), flatten(self._gain_keys))))
self._phs_slope_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: np.array([self._phs_slope[k][0], self._phs_slope[k][1]]), pk)), self._gain_keys)), 0, -1)
def abs_amp_logcal(self, verbose=True):
"""
call abscal_funcs.abs_amp_logcal() method. see its docstring for more details.
Parameters:
-----------
verbose : type=boolean, if True print feedback to stdout
Result:
-------
Absolute amplitude scalar can be accessed via methods
self.abs_eta
self.abs_eta_gain
self.abs_eta_arr
self.abs_eta_gain_arr
"""
# set data quantities
model = self.model
data = self.data
wgts = self.wgts
# run abs_amp_logcal
fit = abs_amp_logcal(model, data, wgts=wgts, verbose=verbose)
# form result
self._abs_eta = odict(list(map(lambda k: (k, copy.copy(fit["eta_{}".format(k[1])])), flatten(self._gain_keys))))
self._abs_eta_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._abs_eta[k], pk)), self._gain_keys)), 0, -1)
def TT_phs_logcal(self, verbose=True, zero_psi=True, four_pol=False):
"""
call abscal_funcs.TT_phs_logcal() method. see its docstring for more details.
Parameters:
-----------
zero_psi : type=boolean, set overall gain phase (psi) to identically zero in linsolve equations.
This is separate than the reference antenna's absolute phase being set to zero, as it can account
for absolute phase offsets between polarizations.
four_pol : type=boolean, even if multiple polarizations are present in data, make free
variables polarization un-aware: i.e. one solution across all polarizations.
This is the same assumption as 4-polarization calibration in omnical.
verbose : type=boolean, if True print feedback to stdout
Result:
-------
Tip-Tilt phase slope and overall phase fit can be accessed via methods
self.abs_psi
self.abs_psi_gain
self.TT_Phi
self.TT_Phi_gain
self.abs_psi_arr
self.abs_psi_gain_arr
self.TT_Phi_arr
self.TT_Phi_gain_arr
"""
# set data quantities
model = self.model
data = self.data
wgts = self.wgts
antpos = self.antpos
# run TT_phs_logcal
fit = TT_phs_logcal(model, data, antpos, wgts=wgts, refant=self.refant, verbose=verbose, zero_psi=zero_psi, four_pol=four_pol)
# manipulate if four_pol
if four_pol:
for i, gp in enumerate(self.gain_pols):
fit['Phi_ew_{}'.format(gp)] = fit["Phi_ew"]
fit['Phi_ns_{}'.format(gp)] = fit["Phi_ns"]
fit.pop('Phi_ew')
fit.pop('Phi_ns')
# form result
self._abs_psi = odict(list(map(lambda k: (k, copy.copy(fit["psi_{}".format(k[1])])), flatten(self._gain_keys))))
self._abs_psi_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: self._abs_psi[k], pk)), self._gain_keys)), 0, -1)
self._TT_Phi = odict(list(map(lambda k: (k, copy.copy(np.array([fit["Phi_ew_{}".format(k[1])], fit["Phi_ns_{}".format(k[1])]]))), flatten(self._gain_keys))))
self._TT_Phi_arr = np.moveaxis(list(map(lambda pk: list(map(lambda k: np.array([self._TT_Phi[k][0], self._TT_Phi[k][1]]), pk)), self._gain_keys)), 0, -1)
# amp_logcal results
@property
def ant_eta(self):
""" return _ant_eta dict, containing per-antenna amplitude solution """
if hasattr(self, '_ant_eta'):
return copy.deepcopy(self._ant_eta)
else:
return None
@property
def ant_eta_gain(self):
""" form complex gain from _ant_eta dict """
if hasattr(self, '_ant_eta'):
ant_eta = self.ant_eta
return odict(list(map(lambda k: (k, np.exp(ant_eta[k]).astype(np.complex)), flatten(self._gain_keys))))
else:
return None
@property
def ant_eta_arr(self):
""" return _ant_eta in ndarray format """
if hasattr(self, '_ant_eta_arr'):
return copy.copy(self._ant_eta_arr)
else:
return None
@property
def ant_eta_gain_arr(self):
""" return _ant_eta_gain in ndarray format """
if hasattr(self, '_ant_eta_arr'):
return np.exp(self.ant_eta_arr).astype(np.complex)
else:
return None
# phs_logcal results
@property
def ant_phi(self):
""" return _ant_phi dict, containing per-antenna phase solution """
if hasattr(self, '_ant_phi'):
return copy.deepcopy(self._ant_phi)
else:
return None
@property
def ant_phi_gain(self):
""" form complex gain from _ant_phi dict """
if hasattr(self, '_ant_phi'):
ant_phi = self.ant_phi
return odict(list(map(lambda k: (k, np.exp(1j * ant_phi[k])), flatten(self._gain_keys))))
else:
return None
@property
def ant_phi_arr(self):
""" return _ant_phi in ndarray format """
if hasattr(self, '_ant_phi_arr'):
return copy.copy(self._ant_phi_arr)
else:
return None
@property
def ant_phi_gain_arr(self):
""" return _ant_phi_gain in ndarray format """
if hasattr(self, '_ant_phi_arr'):
return np.exp(1j * self.ant_phi_arr)
else:
return None
# delay_lincal results
@property
def ant_dly(self):
""" return _ant_dly dict, containing per-antenna delay solution """
if hasattr(self, '_ant_dly'):
return copy.deepcopy(self._ant_dly)
else:
return None
@property
def ant_dly_gain(self):
""" form complex gain from _ant_dly dict """
if hasattr(self, '_ant_dly'):
ant_dly = self.ant_dly
return odict(list(map(lambda k: (k, np.exp(2j * np.pi * self.freqs.reshape(1, -1) * ant_dly[k])), flatten(self._gain_keys))))
else:
return None
@property
def ant_dly_arr(self):
""" return _ant_dly in ndarray format """
if hasattr(self, '_ant_dly_arr'):
return copy.copy(self._ant_dly_arr)
else:
return None
@property
def ant_dly_gain_arr(self):
""" return ant_dly_gain in ndarray format """
if hasattr(self, '_ant_dly_arr'):
return np.exp(2j * np.pi * self.freqs.reshape(-1, 1) * self.ant_dly_arr)
else:
return None
@property
def ant_dly_phi(self):
""" return _ant_dly_phi dict, containing a single phase solution per antenna """
if hasattr(self, '_ant_dly_phi'):
return copy.deepcopy(self._ant_dly_phi)
else:
return None
@property
def ant_dly_phi_gain(self):
""" form complex gain from _ant_dly_phi dict """
if hasattr(self, '_ant_dly_phi'):
ant_dly_phi = self.ant_dly_phi
return odict(list(map(lambda k: (k, np.exp(1j * np.repeat(ant_dly_phi[k], self.Nfreqs, 1))), flatten(self._gain_keys))))
else:
return None
@property
def ant_dly_phi_arr(self):
""" return _ant_dly_phi in ndarray format """
if hasattr(self, '_ant_dly_phi_arr'):
return copy.copy(self._ant_dly_phi_arr)
else:
return None
@property
def ant_dly_phi_gain_arr(self):
""" return _ant_dly_phi_gain in ndarray format """
if hasattr(self, '_ant_dly_phi_arr'):
return np.exp(1j * np.repeat(self.ant_dly_phi_arr, self.Nfreqs, 2))
else:
return None
# delay_slope_lincal results
@property
def dly_slope(self):
""" return _dly_slope dict, containing the delay slope across the array """
if hasattr(self, '_dly_slope'):
return copy.deepcopy(self._dly_slope)
else:
return None
@property
def dly_slope_gain(self):
""" form a per-antenna complex gain from _dly_slope dict and the antpos dictionary attached to the class"""
if hasattr(self, '_dly_slope'):
# get dly_slope dictionary
dly_slope = self.dly_slope
# turn delay slope into per-antenna complex gains, while iterating over self._gain_keys
# einsum sums over antenna position
return odict(list(map(lambda k: (k, np.exp(2j * np.pi * self.freqs.reshape(1, -1) * np.einsum("i...,i->...", dly_slope[k], self.antpos[k[0]][:2]))),
flatten(self._gain_keys))))
else:
return None
def custom_dly_slope_gain(self, gain_keys, antpos):
"""
return dly_slope_gain with custom gain keys and antenna positions
gain_keys : type=list, list of unique (ant, pol). Ex. [(0, 'Jee'), (1, 'Jee'), (0, 'Jnn'), (1, 'Jnn')]
antpos : type=dictionary, contains antenna position vectors. keys are ant integer, values are ant position vectors
"""
if hasattr(self, '_dly_slope'):
# form dict of delay slopes for each polarization in self._gain_keys
# b/c they are identical for all antennas of the same polarization
dly_slope_dict = {ants[0][1]: self.dly_slope[ants[0]] for ants in self._gain_keys}
# turn delay slope into per-antenna complex gains, while iterating over input gain_keys
dly_slope_gain = odict()
for gk in gain_keys:
# einsum sums over antenna position
dly_slope_gain[gk] = np.exp(2j * np.pi * self.freqs.reshape(1, -1) * np.einsum("i...,i->...", dly_slope_dict[gk[1]], antpos[gk[0]][:2]))
return dly_slope_gain
else:
return None
@property
def dly_slope_arr(self):
""" return _dly_slope_arr array """
if hasattr(self, '_dly_slope_arr'):
return copy.copy(self._dly_slope_arr)
else:
return None
@property
def dly_slope_gain_arr(self):
""" form complex gain from _dly_slope_arr array """
if hasattr(self, '_dly_slope_arr'):
# einsum sums over antenna position
return np.exp(2j * np.pi * self.freqs.reshape(-1, 1) * np.einsum("hi...,hi->h...", self._dly_slope_arr, self.antpos_arr[:, :2]))
else:
return None
@property
def dly_slope_ant_dly_arr(self):
""" form antenna delays from _dly_slope_arr array """
if hasattr(self, '_dly_slope_arr'):
# einsum sums over antenna position
return np.einsum("hi...,hi->h...", self._dly_slope_arr, self.antpos_arr[:, :2])
else:
return None
# global_phase_slope_logcal results
@property
def phs_slope(self):
""" return _phs_slope dict, containing the frequency-indpendent phase slope across the array """
if hasattr(self, '_phs_slope'):
return copy.deepcopy(self._phs_slope)
else:
return None
@property
def phs_slope_gain(self):
""" form a per-antenna complex gain from _phs_slope dict and the antpos dictionary attached to the class"""
if hasattr(self, '_phs_slope'):
# get phs_slope dictionary
phs_slope = self.phs_slope
# turn phs slope into per-antenna complex gains, while iterating over self._gain_keys
# einsum sums over antenna position
return odict(list(map(lambda k: (k, np.exp(1.0j * np.ones_like(self.freqs).reshape(1, -1) * np.einsum("i...,i->...", phs_slope[k], self.antpos[k[0]][:2]))),
flatten(self._gain_keys))))
else:
return None
def custom_phs_slope_gain(self, gain_keys, antpos):
"""
return phs_slope_gain with custom gain keys and antenna positions
gain_keys : type=list, list of unique (ant, pol). Ex. [(0, 'Jee'), (1, 'Jee'), (0, 'Jnn'), (1, 'Jnn')]
antpos : type=dictionary, contains antenna position vectors. keys are ant integer, values are ant position vectors
"""
if hasattr(self, '_phs_slope'):
# form dict of phs slopes for each polarization in self._gain_keys
# b/c they are identical for all antennas of the same polarization
phs_slope_dict = {ants[0][1]: self.phs_slope[ants[0]] for ants in self._gain_keys}
# turn phs slope into per-antenna complex gains, while iterating over input gain_keys
phs_slope_gain = odict()
for gk in gain_keys:
# einsum sums over antenna position
phs_slope_gain[gk] = np.exp(1.0j * np.ones_like(self.freqs).reshape(1, -1) * np.einsum("i...,i->...", phs_slope_dict[gk[1]], antpos[gk[0]][:2]))
return phs_slope_gain
else:
return None
@property
def phs_slope_arr(self):
""" return _phs_slope_arr array """
if hasattr(self, '_phs_slope_arr'):
return copy.copy(self._phs_slope_arr)
else:
return None
@property
def phs_slope_gain_arr(self):
""" form complex gain from _phs_slope_arr array """
if hasattr(self, '_phs_slope_arr'):
# einsum sums over antenna position
return np.exp(1.0j * np.ones_like(self.freqs).reshape(-1, 1) * np.einsum("hi...,hi->h...", self._phs_slope_arr, self.antpos_arr[:, :2]))
else:
return None
@property
def phs_slope_ant_phs_arr(self):
""" form antenna delays from _phs_slope_arr array """
if hasattr(self, '_phs_slope_arr'):
# einsum sums over antenna position
return np.einsum("hi...,hi->h...", self._phs_slope_arr, self.antpos_arr[:, :2])
else:
return None
# abs_amp_logcal results
@property
def abs_eta(self):
"""return _abs_eta dict"""
if hasattr(self, '_abs_eta'):
return copy.deepcopy(self._abs_eta)
else:
return None
@property
def abs_eta_gain(self):
"""form complex gain from _abs_eta dict"""
if hasattr(self, '_abs_eta'):
abs_eta = self.abs_eta
return odict(list(map(lambda k: (k, np.exp(abs_eta[k]).astype(np.complex)), flatten(self._gain_keys))))
else:
return None
def custom_abs_eta_gain(self, gain_keys):
"""
return abs_eta_gain with custom gain keys
gain_keys : type=list, list of unique (ant, pol). Ex. [(0, 'Jee'), (1, 'Jee'), (0, 'Jnn'), (1, 'Jnn')]
"""
if hasattr(self, '_abs_eta'):
# form dict of abs eta for each polarization in self._gain_keys
# b/c they are identical for all antennas of the same polarization
abs_eta_dict = {ants[0][1]: self.abs_eta[ants[0]] for ants in self._gain_keys}
# turn abs eta into per-antenna complex gains, while iterating over input gain_keys
abs_eta_gain = odict()
for gk in gain_keys:
abs_eta_gain[gk] = np.exp(abs_eta_dict[gk[1]]).astype(np.complex)
return abs_eta_gain
else:
return None
@property
def abs_eta_arr(self):
"""return _abs_eta_arr array"""
if hasattr(self, '_abs_eta_arr'):
return copy.copy(self._abs_eta_arr)
else:
return None
@property
def abs_eta_gain_arr(self):
"""form complex gain from _abs_eta_arr array"""
if hasattr(self, '_abs_eta_arr'):
return np.exp(self._abs_eta_arr).astype(np.complex)
else:
return None
# TT_phs_logcal results
@property
def abs_psi(self):
"""return _abs_psi dict"""
if hasattr(self, '_abs_psi'):
return copy.deepcopy(self._abs_psi)
else:
return None
@property
def abs_psi_gain(self):
""" form complex gain from _abs_psi array """
if hasattr(self, '_abs_psi'):
abs_psi = self.abs_psi
return odict(list(map(lambda k: (k, np.exp(1j * abs_psi[k])), flatten(self._gain_keys))))
else:
return None
def custom_abs_psi_gain(self, gain_keys):
"""
return abs_psi_gain with custom gain keys
gain_keys : type=list, list of unique (ant, pol). Ex. [(0, 'Jee'), (1, 'Jee'), (0, 'Jnn'), (1, 'Jnn')]
"""
if hasattr(self, '_abs_psi'):
# form dict of abs psi for each polarization in self._gain_keys
# b/c they are identical for all antennas of the same polarization
abs_psi_dict = {ants[0][1]: self.abs_psi[ants[0]] for ants in self._gain_keys}
# turn abs psi into per-antenna complex gains, while iterating over input gain_keys
abs_psi_gain = odict()
for gk in gain_keys:
abs_psi_gain[gk] = np.exp(1j * abs_psi_dict[gk[1]])
return abs_psi_gain
else:
return None
@property
def abs_psi_arr(self):
"""return _abs_psi_arr array"""
if hasattr(self, '_abs_psi_arr'):
return copy.copy(self._abs_psi_arr)
else:
return None
@property
def abs_psi_gain_arr(self):
""" form complex gain from _abs_psi_arr array """
if hasattr(self, '_abs_psi_arr'):
return np.exp(1j * self._abs_psi_arr)
else:
return None
@property
def TT_Phi(self):
"""return _TT_Phi array"""
if hasattr(self, '_TT_Phi'):
return copy.deepcopy(self._TT_Phi)
else:
return None
@property
def TT_Phi_gain(self):
""" form complex gain from _TT_Phi array """
if hasattr(self, '_TT_Phi'):
TT_Phi = self.TT_Phi
# einsum sums over antenna position
return odict(list(map(lambda k: (k, np.exp(1j * np.einsum("i...,i->...", TT_Phi[k], self.antpos[k[0]][:2]))), flatten(self._gain_keys))))
else:
return None
def custom_TT_Phi_gain(self, gain_keys, antpos):
"""
return TT_Phi_gain with custom gain keys and antenna positions
gain_keys : type=list, list of unique (ant, pol). Ex. [(0, 'Jee'), (1, 'Jee'), (0, 'Jnn'), (1, 'Jnn')]
antpos : type=dictionary, contains antenna position vectors. keys are ant integer, values are ant positions
"""
if hasattr(self, '_TT_Phi'):
# form dict of TT_Phi for each polarization in self._gain_keys
# b/c they are identical for all antennas of the same polarization
TT_Phi_dict = {ants[0][1]: self.TT_Phi[ants[0]] for ants in self._gain_keys}
# turn TT_Phi into per-antenna complex gains, while iterating over input gain_keys
TT_Phi_gain = odict()
for gk in gain_keys:
# einsum sums over antenna position
TT_Phi_gain[gk] = np.exp(1j * np.einsum("i...,i->...", TT_Phi_dict[gk[1]], antpos[gk[0]][:2]))
return TT_Phi_gain
else:
return None
@property
def TT_Phi_arr(self):
"""return _TT_Phi_arr array"""
if hasattr(self, '_TT_Phi_arr'):
return copy.copy(self._TT_Phi_arr)
else:
return None
@property
def TT_Phi_gain_arr(self):
""" form complex gain from _TT_Phi_arr array """
if hasattr(self, '_TT_Phi_arr'):
# einsum sums over antenna position
return np.exp(1j * np.einsum("hi...,hi->h...", self._TT_Phi_arr, self.antpos_arr[:, :2]))
else:
return None
def get_all_times_and_lsts(hd, solar_horizon=90.0, unwrap=True):
'''Extract all times and lsts from a HERAData object
Arguments:
hd: HERAData object intialized with one ore more uvh5 file's metadata
solar_horizon: Solar altitude threshold [degrees]. Times are not returned when the Sun is above this altitude.
unwrap: increase all LSTs smaller than the first one by 2pi to avoid phase wrapping
Returns:
all_times: list of times in JD in the file or files
all_lsts: LSTs (in radians) corresponding to all_times
'''
all_times = hd.times
all_lsts = hd.lsts
if len(hd.filepaths) > 1: # in this case, it's a dictionary
all_times = np.array([time for f in hd.filepaths for time in all_times[f]])
all_lsts = np.array([lst for f in hd.filepaths for lst in all_lsts[f]])[np.argsort(all_times)]
if unwrap: # avoid phase wraps
all_lsts[all_lsts < all_lsts[0]] += 2 * np.pi
# remove times when sun was too high
if solar_horizon < 90.0:
lat, lon, alt = hd.telescope_location_lat_lon_alt_degrees
solar_alts = utils.get_sun_alt(all_times, latitude=lat, longitude=lon)
solar_flagged = solar_alts > solar_horizon
return all_times[~solar_flagged], all_lsts[~solar_flagged]
else: # skip this step for speed
return all_times, all_lsts
def get_d2m_time_map(data_times, data_lsts, model_times, model_lsts, extrap_limit=.5):
'''Generate a dictionary that maps data times to model times via shared LSTs.
Arguments:
data_times: list of times in the data (in JD)
data_lsts: list of corresponding LSTs (in radians)
model_times: list of times in the mdoel (in JD)
model_lsts: list of corresponing LSTs (in radians)
extrap_limit: float that sets the maximum distance away in LST, in unit of the median Delta
in model_lsts, that a data time can be mapped to model time. If no model_lst is within
this distance, the data_time is mapped to None. If there is only one model lst, this
is ignored and the nearest time is always returned.
Returns:
d2m_time_map: dictionary uniqely mapping times in the data to times in the model
that are closest in LST. Data times map to None when the nearest model LST is too far,
as defined by the extrap_limit.
'''
# check that the input is sensible
if len(data_times) != len(data_lsts):
raise ValueError('data_times and data_lsts must have the same length.')
if len(model_times) != len(model_lsts):
raise ValueError('model_times and model_lsts must have the same length.')
# compute maximum acceptable distance on the unit circle
max_complex_dist = 2.0
if len(model_lsts) > 1:
max_complex_dist = np.median(np.abs(np.diff(np.exp(1j * model_lsts)))) * extrap_limit
# find indices of nearest model lst for a given data lst
d2m_ind_map = {}
for dind, dlst in enumerate(data_lsts):
lst_complex_distances = np.abs(np.exp(1j * model_lsts) - np.exp(1j * dlst))
# check to see that the nearst model_lst is close enough
if np.min(lst_complex_distances) <= max_complex_dist:
d2m_ind_map[dind] = np.argmin(lst_complex_distances)
else:
d2m_ind_map[dind] = None
# return map of data times to model times using those indices
return {data_times[dind]: model_times[mind] if mind is not None else None
for dind, mind in d2m_ind_map.items()}
def abscal_step(gains_to_update, AC, AC_func, AC_kwargs, gain_funcs, gain_args_list, gain_flags,
gain_convention='divide', max_iter=1, phs_conv_crit=1e-6, verbose=True):
'''Generalized function for performing an abscal step (e.g. abs_amp_logcal or TT_phs_logcal).
NOTE: This function is no longer used and will likely be removed in a future version.
Arguments:
gains_to_update: the gains produced by abscal up until this step. Updated in place.
AC: AbsCal object containing data, model, and other metadata. AC.data is recalibrated
in place using the gains solved for during this step
AC_func: function (usually a class method of AC) to call to instantiate the new gains
which are then accessible as class properties of AC
AC_kwargs: dictionary of kwargs to pass into AC_func
gain_funcs: list of functions to call to return gains after AC_func has been called
gain_args_list: list of tuples of arguments to pass to the corresponding gain_funcs
gain_flags: per-antenna flags to apply to AC.Data when performing recalibration
gain_convention: either 'divide' if raw data is calibrated by dividing it by the gains
otherwise, 'multiply'.
max_iter: maximum number of times to run phase solvers iteratively to avoid the effect
of phase wraps in, e.g. phase_slope_cal or TT_phs_logcal
phs_conv_crit: convergence criterion for updates to iterative phase calibration that compares
the updates to all 1.0s.
verbose: If True, will print the progress of iterative convergence
'''
warnings.warn('abscal_step is no longer used by post_redcal_abscal and thus subject to future removal.', DeprecationWarning)
for i in range(max_iter):
AC_func(**AC_kwargs)
gains_here = merge_gains([gf(*gargs) for gf, gargs in zip(gain_funcs, gain_args_list)])
apply_cal.calibrate_in_place(AC.data, gains_here, AC.wgts, gain_flags,
gain_convention=gain_convention, flags_are_wgts=True)
for k in gains_to_update.keys():
gains_to_update[k] *= gains_here[k]
if max_iter > 1:
crit = np.median(np.linalg.norm([gains_here[k] - 1.0 for
k in gains_here.keys()], axis=(0, 1)))
echo(AC_func.__name__ + " convergence criterion: " + str(crit), verbose=verbose)
if crit < phs_conv_crit:
break
def match_baselines(data_bls, model_bls, data_antpos, model_antpos=None, pols=[], data_is_redsol=False,
model_is_redundant=False, tol=1.0, min_bl_cut=None, max_bl_cut=None, max_dims=2, verbose=False):
'''Figure out which baselines to use in the data and the model for abscal and their correspondence.
Arguments:
data_bls: list of baselines in data file in the form (0, 1, 'ee')
model_bls: list of baselines in model files in the form (0, 1, 'ee')
data_antpos: dictionary mapping antenna number to ENU position in meters for antennas in the data
model_antpos: same as data_antpos, but for the model. If None, assumed to match data_antpos
pols: list of polarizations to use. If empty, will use all polarizations in the data or model.
data_is_redsol: if True, the data file only contains one visibility per unique baseline
model_is_redundant: if True, the model file only contains one visibility per unique baseline
tol: float distance for baseline match tolerance in units of baseline vectors (e.g. meters)
min_bl_cut : float, eliminate all visibilities with baseline separation lengths
smaller than min_bl_cut. This is assumed to be in ENU coordinates with units of meters.
max_bl_cut : float, eliminate all visibilities with baseline separation lengths
larger than max_bl_cut. This is assumed to be in ENU coordinates with units of meters.
Returns:
data_bl_to_load: list of baseline tuples in the form (0, 1, 'ee') to load from the data file
model_bl_to_load: list of baseline tuples in the form (0, 1, 'ee') to load from the model file(s)
data_to_model_bl_map: dictionary mapping data baselines to the corresponding model baseline
'''
if data_is_redsol and not model_is_redundant:
raise NotImplementedError('If the data is just unique baselines, the model must also be just unique baselines.')
if model_antpos is None:
model_antpos = copy.deepcopy(data_antpos)
# Perform cut on baseline length and polarization
if len(pols) == 0:
pols = list(set([bl[2] for bl_list in [data_bls, model_bls] for bl in bl_list]))
data_bl_to_load = set(utils.filter_bls(data_bls, pols=pols, antpos=data_antpos, min_bl_cut=min_bl_cut, max_bl_cut=max_bl_cut))
model_bl_to_load = set(utils.filter_bls(model_bls, pols=pols, antpos=model_antpos, min_bl_cut=min_bl_cut, max_bl_cut=max_bl_cut))
# If we're working with full data sets, only pick out matching keys (or ones that work reversably)
if not data_is_redsol and not model_is_redundant:
data_bl_to_load = [bl for bl in data_bl_to_load if (bl in model_bl_to_load) or (reverse_bl(bl) in model_bl_to_load)]
model_bl_to_load = [bl for bl in model_bl_to_load if (bl in data_bl_to_load) or (reverse_bl(bl) in data_bl_to_load)]
data_to_model_bl_map = {bl: bl for bl in data_bl_to_load if bl in model_bl_to_load}
data_to_model_bl_map.update({bl: reverse_bl(bl) for bl in data_bl_to_load if reverse_bl(bl) in model_bl_to_load})
# Either the model is just unique baselines, or both the data and the model are just unique baselines
else:
# build reds using both sets of antpos to find matching baselines
# increase all antenna indices in the model by model_offset to distinguish them from data antennas
model_offset = np.max(list(data_antpos.keys())) + 1
joint_antpos = {**data_antpos, **{ant + model_offset: pos for ant, pos in model_antpos.items()}}
joint_reds = redcal.get_reds(joint_antpos, pols=pols, bl_error_tol=tol)
# filter out baselines not in data or model or between data and model
joint_reds = [[bl for bl in red if not ((bl[0] < model_offset) ^ (bl[1] < model_offset))] for red in joint_reds]
joint_reds = [[bl for bl in red if (bl in data_bl_to_load) or (reverse_bl(bl) in data_bl_to_load)
or ((bl[0] - model_offset, bl[1] - model_offset, bl[2]) in model_bl_to_load)
or reverse_bl((bl[0] - model_offset, bl[1] - model_offset, bl[2])) in model_bl_to_load] for red in joint_reds]
joint_reds = [red for red in joint_reds if len(red) > 0]
# map baselines in data to unique baselines in model
data_to_model_bl_map = {}
for red in joint_reds:
data_bl_candidates = [bl for bl in red if bl[0] < model_offset]
model_bl_candidates = [(bl[0] - model_offset, bl[1] - model_offset, bl[2]) for bl in red if bl[0] >= model_offset]
assert len(model_bl_candidates) <= 1, ('model_is_redundant is True, but the following model baselines are '
'redundant and in the model file: {}'.format(model_bl_candidates))
if len(model_bl_candidates) == 1:
for bl in red:
if bl[0] < model_offset:
if bl in data_bl_to_load:
data_to_model_bl_map[bl] = model_bl_candidates[0]
elif reverse_bl(bl) in data_bl_to_load:
data_to_model_bl_map[reverse_bl(bl)] = reverse_bl(model_bl_candidates[0])
else:
raise ValueError("Baseline {} looks like a data baseline, but isn't in data_bl_to_load.".format(bl))
assert ((len(data_bl_candidates) <= 1)
or (not data_is_redsol)), ('data_is_redsol is True, but the following data baselines are redundant in the ',
'data file: {}'.format(data_bl_candidates))
# only load baselines in map
data_bl_to_load = [bl for bl in data_bl_to_load if bl in data_to_model_bl_map.keys()]
model_bl_to_load = [bl for bl in model_bl_to_load if (bl in data_to_model_bl_map.values())
or (reverse_bl(bl) in data_to_model_bl_map.values())]
echo("Selected {} data baselines and {} model baselines to load.".format(len(data_bl_to_load), len(model_bl_to_load)), verbose=verbose)
return list(data_bl_to_load), list(model_bl_to_load), data_to_model_bl_map
def build_data_wgts(data_flags, data_nsamples, model_flags, autocorrs, auto_flags, times_by_bl=None,
df=None, data_is_redsol=False, gain_flags=None, tol=1.0, antpos=None):
'''Build linear weights for data in abscal (or calculating chisq) defined as
wgts = (noise variance * nsamples)^-1 * (0 if data or model is flagged).
Note: if there are discontinunities into the autocorrelations, the nsamples, etc., this may
introduce spectral strucutre into the calibration soltuion.
Arguments:
data_flags: DataContainer containing flags on data to be abscaled
data_nsamples: DataContainer containing the number of samples in each data point
model_flags: DataContainer with model flags. Assumed to have all the same keys as the data_flags.
autocorrs: DataContainer with autocorrelation visibilities
auto_flags: DataContainer containing flags for autocorrelation visibilities
times_by_bl: dictionary mapping antenna pairs like (0,1) to float Julian Date. Optional if
inferable from data_flags and all times have length > 1.
df: If None, inferred from data_flags.freqs
data_is_redsol: If True, data_file only contains unique visibilities for each baseline group.
In this case, gain_flags and tol are required and antpos is required if not derivable
from data_flags. In this case, the noise variance is inferred from autocorrelations from
all baselines in the represented unique baseline group.
gain_flags: Used to exclude ants from the noise variance calculation from the autocorrelations
Ignored if data_is_redsol is False.
tol: float distance for baseline match tolerance in units of baseline vectors (e.g. meters).
Ignored if data_is_redsol is False.
antpos: dictionary mapping antenna number to ENU position in meters for antennas in the data.
Ignored if data_is_redsol is False. If left as None, can be inferred from data_flags.data_antpos.
Returns:
wgts: Datacontainer mapping data_flags baseline to weights
'''
# infer times and df if necessary
if times_by_bl is None:
times_by_bl = data_flags.times_by_bl
if df is None:
df = np.median(np.ediff1d(data_flags.freqs))
# if data_is_redsol, get reds, using data_flags.antpos if antpos is unspecified
if data_is_redsol:
if antpos is None:
antpos = data_flags.data_antpos
reds = redcal.get_reds(antpos, bl_error_tol=tol, pols=data_flags.pols())
reds = redcal.filter_reds(reds, ants=[split_bl(bl)[0] for bl in autocorrs])
# build weights dict using (noise variance * nsamples)^-1 * (0 if data or model is flagged)
wgts = {}
for bl in data_flags:
dt = (np.median(np.ediff1d(times_by_bl[bl[:2]])) * 86400.)
wgts[bl] = (data_nsamples[bl] * (~data_flags[bl]) * (~model_flags[bl])).astype(np.float)
if not np.all(wgts[bl] == 0.0):
# use autocorrelations to produce weights
if not data_is_redsol:
noise_var = predict_noise_variance_from_autos(bl, autocorrs, dt=dt, df=df)
# use autocorrelations from all unflagged antennas in unique baseline to produce weights
else:
try: # get redundant group that includes this baseline
red_here = [red for red in reds if (bl in red) or (reverse_bl(bl) in red)][0]
except IndexError: # this baseline has no unflagged redundancies
noise_var = np.inf
else:
noise_vars = []
for rbl in red_here:
noise_var_here = predict_noise_variance_from_autos(rbl, autocorrs, dt=dt, df=df)
for ant in split_bl(rbl):
noise_var_here[auto_flags[join_bl(ant, ant)]] = np.nan
noise_vars.append(noise_var_here)
# estimate noise variance per baseline, assuming inverse variance weighting, but excluding flagged autos
noise_var = np.nansum(np.array(noise_vars)**-1, axis=0)**-1 * np.sum(~np.isnan(noise_vars), axis=0)
wgts[bl] *= noise_var**-1
wgts[bl][~np.isfinite(wgts[bl])] = 0.0
return DataContainer(wgts)
def _get_idealized_antpos(cal_flags, antpos, pols, tol=1.0, keep_flagged_ants=True, data_wgts={}):
'''Figure out a set of idealized antenna positions that doesn't introduce additional
redcal degeneracies.
Arguments:
cal_flags: dictionary mapping keys like (1, 'Jnn') to flag waterfalls
antpos: dictionary mapping antenna numbers to numpy array positions
pols: list of polarizations like ['ee', 'nn']
tol: float distance for baseline match tolerance in units of baseline vectors (e.g. meters)
keep_flagged_ants: If True, flagged antennas that are off-grid (i.e. would introduce an
additional degeneracy) are placed at the origin. Otherwise, flagged antennas in cal_flags
are excluded from idealized_antpos.
data_wgts: DataContainer mapping baselines like (0, 1, 'ee') to weights. Used to check if
flagged antennas off the calibratable grid have no weight. Ignored if keep_flagged_ants
is False.
Returns:
idealized_antpos: dictionary mapping antenna numbers to antenna positions on an N-dimensional
grid where redundant real-world baselines (up to the tol) are perfectly redundant (up to
numerical precision). These baselines will be arbitrarily linearly transformed (stretched,
skewed, etc.) and antennas that introduce extra degeneracies will introduce extra dimensions.
See redcal.reds_to_antpos() for more detail.
'''
# build list of reds without flagged untennas
all_ants = list(cal_flags.keys())
unflagged_ants = [ant for ant in cal_flags if not np.all(cal_flags[ant])]
all_reds = redcal.get_reds(antpos, bl_error_tol=tol, pols=pols)
unflagged_reds = redcal.filter_reds(all_reds, ants=unflagged_ants)
# count the number of dimensions describing the redundancies of unflagged antennas
unflagged_idealized_antpos = redcal.reds_to_antpos(unflagged_reds, tol=redcal.IDEALIZED_BL_TOL)
unflagged_nDims = _count_nDims(unflagged_idealized_antpos, assume_2D=False)
# get the potentially calibratable ants, reds, and idealized_antpos. These are antennas that may
# be flagged, but they they are still on the grid of unflagged antennas and can thus be updated
# without introducing additional degeneracies.
if keep_flagged_ants:
reds = redcal.filter_reds(all_reds, max_dims=unflagged_nDims)
else:
reds = unflagged_reds
calibratable_ants = set([ant for red in reds for bl in red for ant in split_bl(bl)])
idealized_antpos = redcal.reds_to_antpos(reds, tol=redcal.IDEALIZED_BL_TOL)
for ant in unflagged_ants:
if ant not in calibratable_ants:
raise ValueError(f'{ant}, which is not flagged in cal_flags, but is not in the on-grid ants '
f'which are {sorted(list(calibratable_ants))}.')
if keep_flagged_ants:
# figure out which atennas have non-zero weight
ants_with_wgts = set([])
for bl in data_wgts:
if not np.all(data_wgts[bl] == 0.0):
for ant in split_bl(bl):
if ant not in all_ants:
raise ValueError(f'Antenna {ant} has non-zero weight in data_wgts but is not in cal_flags, '
f'which has keys {sorted(list(cal_flags.keys()))}.')
ants_with_wgts.add(ant)
# add off-grid antennas that have no weight at idealized position = 0
for ant in all_ants:
if ant not in calibratable_ants:
if ant in ants_with_wgts:
raise ValueError(f'Antenna {ant} appears in data with non-zero weight, but is not in the on-grid ants '
f'which are {sorted(list(calibratable_ants))}.')
idealized_antpos[ant[0]] = np.zeros(unflagged_nDims)
return idealized_antpos
def post_redcal_abscal(model, data, data_wgts, rc_flags, edge_cut=0, tol=1.0, kernel=(1, 15),
phs_max_iter=100, phs_conv_crit=1e-6, verbose=True, use_abs_amp_lincal=True):
'''Performs Abscal for data that has already been redundantly calibrated.
Arguments:
model: DataContainer containing externally calibrated visibilities, LST-matched to the data.
The model keys must match the data keys.
data: DataContainer containing redundantly but not absolutely calibrated visibilities. This gets modified.
data_wgts: DataContainer containing same keys as data, determines their relative weight in the abscal
linear equation solvers.
rc_flags: dictionary mapping keys like (1, 'Jnn') to flag waterfalls from redundant calibration.
edge_cut : integer number of channels to exclude at each band edge in delay and global phase solvers
tol: float distance for baseline match tolerance in units of baseline vectors (e.g. meters)
kernel: tuple of integers, size of medfilt kernel used in the first step of delay slope calibration.
otherwise, 'multiply'.
phs_max_iter: maximum number of iterations of phase_slope_cal or TT_phs_cal allowed
phs_conv_crit: convergence criterion for updates to iterative phase calibration that compares
the updates to all 1.0s.
use_abs_amp_lincal: finish calibration with an unbiased amplitude lincal step. Default True.
Returns:
abscal_delta_gains: gain dictionary mapping keys like (1, 'Jnn') to waterfalls containing
the updates to the gains between redcal and abscal. Uses keys from rc_flags. Will try to
update flagged antennas if they fall on the grid and don't introduce additional degeneracies.
'''
# get ants, idealized_antpos, and reds
ants = sorted(list(rc_flags.keys()))
idealized_antpos = _get_idealized_antpos(rc_flags, data.antpos, data.pols(),
data_wgts=data_wgts, tol=tol, keep_flagged_ants=True)
reds = redcal.get_reds(idealized_antpos, pols=data.pols(), bl_error_tol=redcal.IDEALIZED_BL_TOL)
# Abscal Step 1: Per-Channel Logarithmic Absolute Amplitude Calibration
gains_here = abs_amp_logcal(model, data, wgts=data_wgts, verbose=verbose, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: gains_here[ant] for ant in ants}
apply_cal.calibrate_in_place(data, gains_here)
# Abscal Step 2: Global Delay Slope Calibration
binary_wgts = DataContainer({bl: (data_wgts[bl] > 0).astype(np.float) for bl in data_wgts})
df = np.median(np.diff(data.freqs))
for time_avg in [True, False]: # first use the time-averaged solution to try to avoid false minima
gains_here = delay_slope_lincal(model, data, idealized_antpos, wgts=binary_wgts, df=df, f0=data.freqs[0], medfilt=True, kernel=kernel,
assume_2D=False, time_avg=time_avg, verbose=verbose, edge_cut=edge_cut, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: abscal_delta_gains[ant] * gains_here[ant] for ant in ants}
apply_cal.calibrate_in_place(data, gains_here)
# Abscal Step 3: Global Phase Slope Calibration (first using ndim_fft, then using linfit)
for time_avg in [True, False]:
gains_here = global_phase_slope_logcal(model, data, idealized_antpos, reds=reds, solver='ndim_fft', wgts=binary_wgts, verbose=verbose, assume_2D=False,
tol=redcal.IDEALIZED_BL_TOL, edge_cut=edge_cut, time_avg=time_avg, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: abscal_delta_gains[ant] * gains_here[ant] for ant in ants}
apply_cal.calibrate_in_place(data, gains_here)
for time_avg in [True, False]:
for i in range(phs_max_iter):
gains_here = global_phase_slope_logcal(model, data, idealized_antpos, reds=reds, solver='linfit', wgts=binary_wgts, verbose=verbose, assume_2D=False,
tol=redcal.IDEALIZED_BL_TOL, edge_cut=edge_cut, time_avg=time_avg, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: abscal_delta_gains[ant] * gains_here[ant] for ant in ants}
apply_cal.calibrate_in_place(data, gains_here)
crit = np.median(np.linalg.norm([gains_here[k] - 1.0 for k in gains_here.keys()], axis=(0, 1)))
echo("global_phase_slope_logcal convergence criterion: " + str(crit), verbose=verbose)
if crit < phs_conv_crit:
break
# Abscal Step 4: Per-Channel Tip-Tilt Phase Calibration
angle_wgts = DataContainer({bl: 2 * np.abs(model[bl])**2 * data_wgts[bl] for bl in model})
# This is because, in the high SNR limit, if Var(model) = 0 and Var(data) = Var(noise),
# then Var(angle(data / model)) = Var(noise) / (2 |model|^2). Here data_wgts = Var(noise)^-1.
for i in range(phs_max_iter):
gains_here = TT_phs_logcal(model, data, idealized_antpos, wgts=angle_wgts, verbose=verbose, assume_2D=False, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: abscal_delta_gains[ant] * gains_here[ant] for ant in ants}
apply_cal.calibrate_in_place(data, gains_here)
crit = np.median(np.linalg.norm([gains_here[k] - 1.0 for k in gains_here.keys()], axis=(0, 1)))
echo("TT_phs_logcal convergence criterion: " + str(crit), verbose=verbose)
if crit < phs_conv_crit:
break
# Abscal Step 5: Per-Channel Linear Absolute Amplitude Calibration
if use_abs_amp_lincal:
gains_here = abs_amp_lincal(model, data, wgts=data_wgts, verbose=verbose, return_gains=True, gain_ants=ants)
abscal_delta_gains = {ant: abscal_delta_gains[ant] * gains_here[ant] for ant in ants}
return abscal_delta_gains
def post_redcal_abscal_run(data_file, redcal_file, model_files, raw_auto_file=None, data_is_redsol=False, model_is_redundant=False, output_file=None,
nInt_to_load=None, data_solar_horizon=90, model_solar_horizon=90, extrap_limit=.5, min_bl_cut=1.0, max_bl_cut=None,
edge_cut=0, tol=1.0, phs_max_iter=100, phs_conv_crit=1e-6, refant=None, clobber=True, add_to_history='', verbose=True):
'''Perform abscal on entire data files, picking relevant model_files from a list and doing partial data loading.
Does not work on data (or models) with baseline-dependant averaging.
Arguments:
data_file: string path to raw uvh5 visibility file or omnical_visibility solution
(in the later case, one must also set data_is_redsol to True).
redcal_file: string path to redcal calfits file. This forms the basis of the resultant abscal calfits file.
If data_is_redsol is False, this will also be used to calibrate the data_file and raw_auto_file
model_files: list of string paths to externally calibrated data or a reference simulation.
Strings must be sortable to produce a chronological list in LST (wrapping over 2*pi is OK).
raw_auto_file: path to data file that contains raw autocorrelations for all antennas in redcal_file.
These are used for weighting and calculating chi^2. If data_is_redsol, this must be provided.
If this is None and data_file will be used.
data_is_redsol: If True, data_file only contains unique visibilities for each baseline group. This means it has been
redundantly calibrated by the gains in redcal_file already. If this is True, model_is_redundant must also be True
and raw_auto_file must be provided. If both this and model_is_redundant are False, then only exact baseline
matches are used in absolute calibration.
model_is_redundant: If True, then model_files only containe unique visibilities. In this case, data and model
antenna numbering do not need to agree, as redundant baselines will be found automatically.
output_file: string path to output abscal calfits file. If None, will be redcal_file.replace('.omni.', '.abs.')
nInt_to_load: number of integrations to load and calibrate simultaneously. Default None loads all integrations.
data_solar_horizon: Solar altitude threshold [degrees]. When the sun is too high in the data, flag the integration.
model_solar_horizon: Solar altitude threshold [degrees]. When the sun is too high in the model, flag the integration.
extrap_limit: float maximum LST difference (in units of delta LST of the model) allowed between matching data and model times
min_bl_cut: minimum baseline separation [meters] to keep in data when calibrating. None or 0 means no mininum,
which will include autocorrelations in the absolute calibration. Usually this is not desired, so the default is 1.0.
max_bl_cut: maximum baseline separation [meters] to keep in data when calibrating. None (default) means no maximum.
edge_cut: integer number of channels to exclude at each band edge in delay and global phase solvers
tol: baseline match tolerance in units of baseline vectors (e.g. meters)
phs_max_iter: integer maximum number of iterations of phase_slope_cal or TT_phs_cal allowed
phs_conv_crit: convergence criterion for updates to iterative phase calibration that compares them to all 1.0s.
refant: tuple of the form (0, 'Jnn') indicating the antenna defined to have 0 phase. If None, refant will be automatically chosen.
clobber: if True, overwrites existing abscal calfits file at the output path
add_to_history: string to add to history of output abscal file
Returns:
hc: HERACal object which was written to disk. Matches the input redcal_file with an updated history.
This HERACal object has been updated with the following properties accessible on hc.build_calcontainers():
* gains: abscal gains for times that could be calibrated, redcal gains otherwise (but flagged)
* flags: redcal flags, with additional flagging if the data is flagged (see flag_utils.synthesize_ant_flags) or if
if the model is completely flagged for a given freq/channel when reduced to a single flagging waterfall
* quals: abscal chi^2 per antenna based on calibrated data minus model (Normalized by noise/nObs, but not with proper DoF)
* total_qual: abscal chi^2 based on calibrated data minus model (Normalized by noise/nObs, but not with proper DoF)
'''
# Raise error if output calfile already exists and clobber is False
if output_file is None:
output_file = redcal_file.replace('.omni.', '.abs.')
if os.path.exists(output_file) and not clobber:
raise IOError("{} exists, not overwriting.".format(output_file))
# Make raw_auto_file the data_file if None when appropriate, otherwise raise an error
if raw_auto_file is None:
if not data_is_redsol:
raw_auto_file = data_file
else:
raise ValueError('If the data is a redundant visibility solution, raw_auto_file must be specified.')
# Load redcal calibration
hc = io.HERACal(redcal_file)
rc_gains, rc_flags, rc_quals, rc_tot_qual = hc.read()
assert hc.gain_convention == 'divide', "The calibration gain convention in {} is not the HERA standard 'divide'.".format(redcal_file)
# Initialize full-size, totally-flagged abscal gain/flag/etc. dictionaries
abscal_gains = copy.deepcopy(rc_gains)
abscal_flags = {ant: np.ones_like(rf) for ant, rf in rc_flags.items()}
abscal_chisq_per_ant = {ant: np.zeros_like(rq) for ant, rq in rc_quals.items()} # this stays zero, as it's not particularly meaningful
abscal_chisq = {pol: np.zeros_like(rtq) for pol, rtq in rc_tot_qual.items()}
# match times to narrow down model_files
matched_model_files = sorted(set(match_times(data_file, model_files, filetype='uvh5')))
if len(matched_model_files) == 0:
echo("No model files overlap with data files in LST. Result will be fully flagged.", verbose=verbose)
else:
echo("The following model files overlap with data files in LST:\n" + "\n".join(matched_model_files), verbose=verbose)
hd = io.HERAData(data_file)
hdm = io.HERAData(matched_model_files)
if hc.gain_scale is not None and hc.gain_scale.lower() != "uncalib":
warnings.warn(f"Warning: Overwriting redcal gain_scale of {hc.gain_scale} with model gain_scale of {hdm.vis_units}", RuntimeWarning)
hc.gain_scale = hdm.vis_units # set vis_units of hera_cal based on model files.
hd_autos = io.HERAData(raw_auto_file)
assert hdm.x_orientation == hd.x_orientation, 'Data x_orientation, {}, does not match model x_orientation, {}'.format(hd.x_orientation, hdm.x_orientation)
assert hc.x_orientation == hd.x_orientation, 'Data x_orientation, {}, does not match redcal x_orientation, {}'.format(hd.x_orientation, hc.x_orientation)
pol_load_list = [pol for pol in hd.pols if split_pol(pol)[0] == split_pol(pol)[1]]
# get model bls and antpos to use later in baseline matching
model_bls = hdm.bls
model_antpos = hdm.data_antpos
if len(matched_model_files) > 1: # in this case, it's a dictionary
model_bls = list(set([bl for bls in list(hdm.bls.values()) for bl in bls]))
model_antpos = {ant: pos for antpos in hdm.data_antpos.values() for ant, pos in antpos.items()}
# match integrations in model to integrations in data
all_data_times, all_data_lsts = get_all_times_and_lsts(hd, solar_horizon=data_solar_horizon, unwrap=True)
all_model_times, all_model_lsts = get_all_times_and_lsts(hdm, solar_horizon=model_solar_horizon, unwrap=True)
d2m_time_map = get_d2m_time_map(all_data_times, all_data_lsts, all_model_times, all_model_lsts, extrap_limit=extrap_limit)
# group matched time indices for partial I/O
matched_tinds = [tind for tind, time in enumerate(hd.times) if time in d2m_time_map and d2m_time_map[time] is not None]
if len(matched_tinds) > 0:
tind_groups = np.array([matched_tinds]) # just load a single group
if nInt_to_load is not None: # split up the integrations to load nInt_to_load at a time
tind_groups = np.split(matched_tinds, np.arange(nInt_to_load, len(matched_tinds), nInt_to_load))
# loop over polarizations
for pol in pol_load_list:
echo('\n\nNow calibrating ' + pol + '-polarization...', verbose=verbose)
ants = [ant for ant in abscal_gains if join_pol(ant[1], ant[1]) == pol]
# figure out which baselines to load from the data and the model and their correspondence (if one or both is redundantly averaged)
(data_bl_to_load,
model_bl_to_load,
data_to_model_bl_map) = match_baselines(hd.bls, model_bls, hd.data_antpos, model_antpos=model_antpos, pols=[pol],
data_is_redsol=data_is_redsol, model_is_redundant=model_is_redundant,
tol=tol, min_bl_cut=min_bl_cut, max_bl_cut=max_bl_cut, verbose=verbose)
if (len(data_bl_to_load) == 0) or (len(model_bl_to_load) == 0):
echo("No baselines in the data match baselines in the model. Results for this polarization will be fully flagged.", verbose=verbose)
else:
# loop over groups of time indices
for tinds in tind_groups:
echo('\n Now calibrating times ' + str(hd.times[tinds[0]])
+ ' through ' + str(hd.times[tinds[-1]]) + '...', verbose=verbose)
# load data and apply calibration (unless data_is_redsol, so it's already redcal'ed)
data, flags, nsamples = hd.read(times=hd.times[tinds], bls=data_bl_to_load)
rc_gains_subset = {k: rc_gains[k][tinds, :] for k in ants}
rc_flags_subset = {k: rc_flags[k][tinds, :] for k in ants}
if not data_is_redsol: # data is raw, so redundantly calibrate it
calibrate_in_place(data, rc_gains_subset, data_flags=flags, cal_flags=rc_flags_subset)
if not np.all(list(flags.values())):
# load model and rephase
model_times_to_load = [d2m_time_map[time] for time in hd.times[tinds]]
model, model_flags, _ = io.partial_time_io(hdm, np.unique(model_times_to_load), bls=model_bl_to_load)
if not np.array_equal(model_times_to_load, model.times):
# if multiple data times map to a single model time, this expands the model to match the data in time
model.select_or_expand_times(model_times_to_load)
model_flags.select_or_expand_times(model_times_to_load)
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, data.lsts - model.lsts,
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
# Flag frequencies and times in the data that are entirely flagged in the model
model_flag_waterfall = np.all([f for f in model_flags.values()], axis=0)
for k in flags.keys():
flags[k] += model_flag_waterfall
# get the relative wgts for each piece of data
auto_bls = [join_bl(ant, ant) for ant in rc_gains if join_bl(ant, ant)[2] == pol]
autocorrs, auto_flags, _ = hd_autos.read(times=hd.times[tinds], bls=auto_bls)
calibrate_in_place(autocorrs, rc_gains_subset, data_flags=auto_flags, cal_flags=rc_flags_subset)
# use data_to_model_bl_map to rekey model. Does not copy to save memory.
model = DataContainer({bl: model[data_to_model_bl_map[bl]] for bl in data})
model_flags = DataContainer({bl: model_flags[data_to_model_bl_map[bl]] for bl in data})
# build data weights based on inverse noise variance and nsamples and flags
data_wgts = build_data_wgts(flags, nsamples, model_flags, autocorrs, auto_flags,
times_by_bl=hd.times_by_bl, df=np.median(np.ediff1d(data.freqs)),
data_is_redsol=data_is_redsol, gain_flags=rc_flags_subset, antpos=hd.data_antpos)
# run absolute calibration to get the gain updates
delta_gains = post_redcal_abscal(model, data, data_wgts, rc_flags_subset, edge_cut=edge_cut, tol=tol,
phs_max_iter=phs_max_iter, phs_conv_crit=phs_conv_crit, verbose=verbose)
# abscal autos, rebuild weights, and generate abscal Chi^2
calibrate_in_place(autocorrs, delta_gains)
chisq_wgts = build_data_wgts(flags, nsamples, model_flags, autocorrs, auto_flags,
times_by_bl=hd.times_by_bl, df=np.median(np.ediff1d(data.freqs)),
data_is_redsol=data_is_redsol, gain_flags=rc_flags_subset, antpos=hd.data_antpos)
total_qual, nObs, quals, nObs_per_ant = utils.chisq(data, model, chisq_wgts,
gain_flags=rc_flags_subset, split_by_antpol=True)
# update results
for ant in ants:
# new gains are the product of redcal gains and delta gains from abscal
abscal_gains[ant][tinds, :] = rc_gains_subset[ant] * delta_gains[ant]
# new flags are the OR of redcal flags and times/freqs totally flagged in the model
abscal_flags[ant][tinds, :] = rc_flags_subset[ant] + model_flag_waterfall
for antpol in total_qual.keys():
abscal_chisq[antpol][tinds, :] = total_qual[antpol] / nObs[antpol] # Note, not normalized for DoF
abscal_chisq[antpol][tinds, :][~np.isfinite(abscal_chisq[antpol][tinds, :])] = 0.
# impose a single reference antenna on the final antenna solution
if refant is None:
refant = pick_reference_antenna(abscal_gains, abscal_flags, hc.freqs, per_pol=True)
rephase_to_refant(abscal_gains, refant, flags=abscal_flags, propagate_refant_flags=True)
# flag any nans, infs, etc.
for ant in abscal_gains:
abscal_flags[ant][~np.isfinite(abscal_gains[ant])] = True
abscal_gains[ant][~np.isfinite(abscal_gains[ant])] = 1.0 + 0.0j
# Save results to disk
hc.update(gains=abscal_gains, flags=abscal_flags, quals=abscal_chisq_per_ant, total_qual=abscal_chisq)
hc.quality_array[np.isnan(hc.quality_array)] = 0
hc.total_quality_array[np.isnan(hc.total_quality_array)] = 0
hc.history += version.history_string(add_to_history)
hc.write_calfits(output_file, clobber=clobber)
return hc
def post_redcal_abscal_argparser():
''' Argparser for commandline operation of hera_cal.abscal.post_redcal_abscal_run() '''
a = argparse.ArgumentParser(description="Command-line drive script for post-redcal absolute calibration using hera_cal.abscal module")
a.add_argument("data_file", type=str, help="string path to raw uvh5 visibility file or omnical_visibility solution")
a.add_argument("redcal_file", type=str, help="string path to calfits file that serves as the starting point of abscal")
a.add_argument("model_files", type=str, nargs='+', help="list of string paths to externally calibrated data or reference solution. Strings \
must be sortable to produce a chronological list in LST (wrapping over 2*pi is OK)")
a.add_argument("--raw_auto_file", default=None, type=str, help="path to data file that contains raw autocorrelations for all antennas in redcal_file. \
If not provided, data_file is used instead. Required if data_is_redsol is True.")
a.add_argument("--data_is_redsol", default=False, action="store_true", help="If True, data_file only contains unique, redcal'ed visibilities.")
a.add_argument("--model_is_redundant", default=False, action="store_true", help="If True, then model_files only containe unique visibilities.")
a.add_argument("--output_file", default=None, type=str, help="string path to output abscal calfits file. If None, will be redcal_file.replace('.omni.', '.abs.'")
a.add_argument("--nInt_to_load", default=None, type=int, help="number of integrations to load and calibrate simultaneously. Default None loads all integrations.")
a.add_argument("--data_solar_horizon", default=90.0, type=float, help="Solar altitude threshold [degrees]. When the sun is too high in the data, flag the integration.")
a.add_argument("--model_solar_horizon", default=90.0, type=float, help="Solar altitude threshold [degrees]. When the sun is too high in the model, flag the integration.")
a.add_argument("--min_bl_cut", default=1.0, type=float, help="minimum baseline separation [meters] to keep in data when calibrating. None or 0 means no mininum, which will \
include autocorrelations in the absolute calibration. Usually this is not desired, so the default is 1.0.")
a.add_argument("--max_bl_cut", default=None, type=float, help="maximum baseline separation [meters] to keep in data when calibrating. None (default) means no maximum.")
a.add_argument("--edge_cut", default=0, type=int, help="integer number of channels to exclude at each band edge in delay and global phase solvers")
a.add_argument("--tol", default=1.0, type=float, help="baseline match tolerance in units of baseline vectors (e.g. meters)")
a.add_argument("--phs_max_iter", default=100, type=int, help="integer maximum number of iterations of phase_slope_cal or TT_phs_cal allowed")
a.add_argument("--phs_conv_crit", default=1e-6, type=float, help="convergence criterion for updates to iterative phase calibration that compares them to all 1.0s.")
a.add_argument("--clobber", default=False, action="store_true", help="overwrites existing abscal calfits file at the output path")
a.add_argument("--verbose", default=False, action="store_true", help="print calibration progress updates")
args = a.parse_args()
return args
| 45.884471 | 177 | 0.63396 | 24,611 | 170,782 | 4.242493 | 0.055747 | 0.002442 | 0.008466 | 0.006302 | 0.528608 | 0.474581 | 0.432508 | 0.403517 | 0.366155 | 0.345027 | 0 | 0.008873 | 0.271451 | 170,782 | 3,721 | 178 | 45.896802 | 0.830297 | 0.415372 | 0 | 0.352587 | 0 | 0.006017 | 0.076929 | 0.006008 | 0 | 0 | 0 | 0 | 0.012635 | 1 | 0.055355 | false | 0 | 0.013839 | 0.003008 | 0.152226 | 0.001805 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
accf879ecc878fbff08148fad3de44db7465965b | 8,484 | py | Python | ivory/core/base.py | daizutabi/ivory | d961e6c05ece112d99b8f8c2d6dad530f60b7303 | [
"MIT"
] | 1 | 2019-05-16T10:38:53.000Z | 2019-05-16T10:38:53.000Z | ivory/core/base.py | daizutabi/ivory | d961e6c05ece112d99b8f8c2d6dad530f60b7303 | [
"MIT"
] | null | null | null | ivory/core/base.py | daizutabi/ivory | d961e6c05ece112d99b8f8c2d6dad530f60b7303 | [
"MIT"
] | null | null | null | """This module provides base classes for Ivory."""
import copy
import inspect
from typing import Callable, Dict, Tuple
import ivory.core.collections
from ivory import utils
from ivory.core import default, instance
class Base(ivory.core.collections.Dict):
"""Base class for an entity class such as `Client`, `Experiment`, and `Run`.
Args:
params (dict, optional): Parameter dictionary to create this instance.
**instances: Member instances. Key is its name and value is the member instance.
Attributes:
params (dict, optional): Parameter dictionary that is used to to create
this instance.
id (str): Instance ID given by
[MLFlow Tracking](https://www.mlflow.org/docs/latest/tracking.html).
name (str): Instance name.
source_name (str): Name of the YAML parameter file that is used to create
this instance.
"""
def __init__(self, params=None, **instances):
super().__init__()
self.params = params
self.id = self.name = self.source_name = ""
if "id" in instances:
self.id = instances.pop("id")
if "name" in instances:
self.name = instances.pop("name")
if "source_name" in instances:
self.source_name = instances.pop("source_name")
self.dict = instances
def __repr__(self):
args = []
if self.id:
args.append(f"id={self.id!r}")
if self.name:
args.append(f"name={self.name!r}")
args.append(f"num_instances={len(self)}")
args = ", ".join(args)
return f"{self.__class__.__name__}({args})"
class Creator(Base):
"""Creator class to create `Run` instances."""
@property
def experiment_id(self) -> str:
return self.params["experiment"]["id"]
@property
def experiment_name(self) -> str:
return self.params["experiment"]["name"]
def create_params(
self, args=None, name: str = "run", **kwargs
) -> Tuple[dict, dict]:
"""Returns a tuple of (parameter dictionary, update dictionary).
The parameter dictionary is deeply copied from original one, then updated
according to the arguments. The update dictionary includes updated parameter
only.
Args:
args (dict, optional): Update dictionary.
name: Run class name in lower case.
**kwargs: Additional update dictionary.
Examples:
Use `args` for parameters including dots:
params, update = experiment.create_params(
{'hidden_sizes.0': 100}, fold=3
)
The `params` is the full parameter dictionary, while the `update` is a
part of `params`, i.e., `update = {'hidden_sizes.0': 100, 'fold': 3}`.
"""
params = copy.deepcopy(self.params)
if name not in params:
params.update(default.get(name))
update, args = utils.params.create_update(params[name], args, **kwargs)
utils.params.update_dict(params[name], update)
return params, args
def create_run(self, args=None, name: str = "run", tags=None, **kwargs):
"""Creates a `Run` instance according to arguments.
Args:
args (dict, optional): Update dictionary.
name: Run class name in lower case.
tags (dict, optional): Tags dictionary.
**kwargs: Additional update dictionary.
Returns:
Run: Created `Run` instance. The parameter for this instance is the
returned dictionary from the
[`create_params()`](#ivory.core.base.Creator.create_params) function.
"""
params, args = self.create_params(args, name, **kwargs)
run = instance.create_base_instance(params, name, self.source_name)
if self.tracker:
from ivory.callbacks.pruning import Pruning
run.set_tracker(self.tracker, name)
run.tracking.log_params_artifact(run)
run.tracking.log_files_artifact(run)
args = {arg: utils.params.get_value(run.params[name], arg) for arg in args}
run.tracking.log_params(run.id, args)
if tags:
run.tracking.set_tags(run.id, tags)
run.set(pruning=Pruning())
return run
def create_instance(self, instance_name: str, args=None, name="run", **kwargs):
"""Creates an member instance of a `Run` according to arguments.
Args:
instance_name: Name of a member instance to create.
args (dict, optional): Update dictionary.
name: Run class name in lower case.
**kwargs: Additional update dictionary.
Returns:
Created instance. The parameter for this instance is the
returned directory from the
[`create_params()`](#ivory.core.base.Creator.create_params) function.
"""
params, _ = self.create_params(args, name, **kwargs)
return instance.create_instance(params[name], instance_name)
class Callback:
"""Callback class for the Ivory callback system."""
METHODS = [
"on_init_begin",
"on_init_end",
"on_fit_begin",
"on_epoch_begin",
"on_train_begin",
"on_train_end",
"on_val_begin",
"on_val_end",
"on_epoch_end",
"on_fit_end",
"on_test_begin",
"on_test_end",
]
ARGUMENTS = ["run"]
def __init__(self, caller: "CallbackCaller", methods: Dict[str, Callable]):
self.caller = caller
self.methods = methods
def __repr__(self):
class_name = self.__class__.__name__
callbacks = list(self.methods.keys())
return f"{class_name}({callbacks})"
def __call__(self):
caller = self.caller
for method in self.methods.values():
method(caller)
class CallbackCaller(Creator):
"""Callback caller class."""
def create_callbacks(self):
"""Creates callback functions and store them in a dictionary."""
for method in Callback.METHODS:
methods = {}
for key in self:
if hasattr(self[key], method):
callback = getattr(self[key], method)
if callable(callback):
parameters = inspect.signature(callback).parameters
if list(parameters.keys()) == Callback.ARGUMENTS:
methods[key] = callback
self[method] = Callback(self, methods)
class Experiment(Creator):
"""Experimet class is one of the main classes of Ivory library.
Basically, one experiment is corresponding to one YAML parameter file that is held
in an `Experiment` instance as a parameter dictionary. This parameter dictionary
defines the default parameter values to create `Run` instances.
See Also:
The base class [`ivory.core.base.Creator`](#ivory.core.base.Creator)
defines some functions to create a `Run` instance or its member instance.
"""
def set_tracker(self, tracker):
"""Sets a `Tracker` instance for tracking.
Args:
tracker (Tracker): Tracker instance.
"""
if not self.id:
self.id = tracker.create_experiment(self.name)
self.params["experiment"]["id"] = self.id
self.set(tracker=tracker)
def create_task(self):
"""Creates a `Task` instance for multiple runs.
See Also:
For more details, see
[client.create_task()](/api/ivory.core.client#ivory.core.client.Client.create_task)
[Multiple Runs](/tutorial/task) in Tutorial.
"""
return self.create_run(name="task")
def create_study(self, args=None, **suggests):
"""Creates a `Study` instance for hyperparameter tuning.
See Also:
For more details, see
[client.create_study()](/api/ivory.core.client#ivory.core.client.Client.create_study)
[Hyperparameter Tuning](/tutorial/tuning) in Tutorial
"""
study = self.create_run(name="study")
if isinstance(args, str) and args in study.objective:
study.objective.suggests = {args: study.objective.suggests[args]}
return study
if args or suggests:
study.objective.update(args, **suggests)
return study
| 34.91358 | 97 | 0.604078 | 996 | 8,484 | 5.03012 | 0.180723 | 0.01976 | 0.010379 | 0.015968 | 0.21018 | 0.169062 | 0.127146 | 0.127146 | 0.112774 | 0.075649 | 0 | 0.001662 | 0.290665 | 8,484 | 242 | 98 | 35.057851 | 0.830841 | 0.394625 | 0 | 0.052174 | 0 | 0 | 0.079965 | 0.018036 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121739 | false | 0 | 0.06087 | 0.017391 | 0.330435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
accfbe5ff79a258f7d20ab83d29c7174e720028b | 6,634 | py | Python | esphome/symlink_ops.py | pi4homez/esphome | 697e9b0c28bb690719fa1d16ca8198ce5fd1d2be | [
"MIT"
] | 5 | 2019-04-14T09:43:29.000Z | 2021-07-17T06:36:44.000Z | esphome/symlink_ops.py | pi4homez/esphome | 697e9b0c28bb690719fa1d16ca8198ce5fd1d2be | [
"MIT"
] | null | null | null | esphome/symlink_ops.py | pi4homez/esphome | 697e9b0c28bb690719fa1d16ca8198ce5fd1d2be | [
"MIT"
] | 4 | 2019-07-08T08:58:44.000Z | 2021-12-18T21:56:22.000Z | import os
if hasattr(os, 'symlink'):
def symlink(src, dst):
return os.symlink(src, dst)
def islink(path):
return os.path.islink(path)
def readlink(path):
return os.readlink(path)
def unlink(path):
return os.unlink(path)
else:
import ctypes
from ctypes import wintypes
# Code taken from
# https://stackoverflow.com/questions/27972776/having-trouble-implementing-a-readlink-function
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
FILE_READ_ATTRIBUTES = 0x0080
OPEN_EXISTING = 3
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_ATTRIBUTE_REPARSE_POINT = 0x0400
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
FSCTL_GET_REPARSE_POINT = 0x000900A8
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 0x4000
LPDWORD = ctypes.POINTER(wintypes.DWORD)
LPWIN32_FIND_DATA = ctypes.POINTER(wintypes.WIN32_FIND_DATAW)
INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value
def IsReparseTagNameSurrogate(tag):
return bool(tag & 0x20000000)
def _check_invalid_handle(result, func, args):
if result == INVALID_HANDLE_VALUE:
raise ctypes.WinError(ctypes.get_last_error())
return args
def _check_bool(result, func, args):
if not result:
raise ctypes.WinError(ctypes.get_last_error())
return args
kernel32.FindFirstFileW.errcheck = _check_invalid_handle
kernel32.FindFirstFileW.restype = wintypes.HANDLE
kernel32.FindFirstFileW.argtypes = (
wintypes.LPCWSTR, # _In_ lpFileName
LPWIN32_FIND_DATA) # _Out_ lpFindFileData
kernel32.FindClose.argtypes = (
wintypes.HANDLE,) # _Inout_ hFindFile
kernel32.CreateFileW.errcheck = _check_invalid_handle
kernel32.CreateFileW.restype = wintypes.HANDLE
kernel32.CreateFileW.argtypes = (
wintypes.LPCWSTR, # _In_ lpFileName
wintypes.DWORD, # _In_ dwDesiredAccess
wintypes.DWORD, # _In_ dwShareMode
wintypes.LPVOID, # _In_opt_ lpSecurityAttributes
wintypes.DWORD, # _In_ dwCreationDisposition
wintypes.DWORD, # _In_ dwFlagsAndAttributes
wintypes.HANDLE) # _In_opt_ hTemplateFile
kernel32.CloseHandle.argtypes = (
wintypes.HANDLE,) # _In_ hObject
kernel32.DeviceIoControl.errcheck = _check_bool
kernel32.DeviceIoControl.argtypes = (
wintypes.HANDLE, # _In_ hDevice
wintypes.DWORD, # _In_ dwIoControlCode
wintypes.LPVOID, # _In_opt_ lpInBuffer
wintypes.DWORD, # _In_ nInBufferSize
wintypes.LPVOID, # _Out_opt_ lpOutBuffer
wintypes.DWORD, # _In_ nOutBufferSize
LPDWORD, # _Out_opt_ lpBytesReturned
wintypes.LPVOID) # _Inout_opt_ lpOverlapped
class REPARSE_DATA_BUFFER(ctypes.Structure):
class ReparseData(ctypes.Union):
class LinkData(ctypes.Structure):
_fields_ = (('SubstituteNameOffset', wintypes.USHORT),
('SubstituteNameLength', wintypes.USHORT),
('PrintNameOffset', wintypes.USHORT),
('PrintNameLength', wintypes.USHORT))
@property
def PrintName(self):
dt = wintypes.WCHAR * (self.PrintNameLength // ctypes.sizeof(wintypes.WCHAR))
name = dt.from_address(ctypes.addressof(self.PathBuffer) +
self.PrintNameOffset).value
if name.startswith(r'\??'):
name = r'\\?' + name[3:] # NT => Windows
return name
class SymbolicLinkData(LinkData):
_fields_ = (('Flags', wintypes.ULONG), ('PathBuffer', wintypes.BYTE * 0))
class MountPointData(LinkData):
_fields_ = (('PathBuffer', wintypes.BYTE * 0),)
class GenericData(ctypes.Structure):
_fields_ = (('DataBuffer', wintypes.BYTE * 0),)
_fields_ = (('SymbolicLinkReparseBuffer', SymbolicLinkData),
('MountPointReparseBuffer', MountPointData),
('GenericReparseBuffer', GenericData))
_fields_ = (('ReparseTag', wintypes.ULONG),
('ReparseDataLength', wintypes.USHORT),
('Reserved', wintypes.USHORT),
('ReparseData', ReparseData))
_anonymous_ = ('ReparseData',)
def symlink(src, dst):
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
flags = 1 if os.path.isdir(src) else 0
if csl(dst, src, flags) == 0:
error = ctypes.WinError()
# pylint: disable=no-member
if error.winerror == 1314 and error.errno == 22:
from esphome.core import EsphomeError
raise EsphomeError("Cannot create symlink from '%s' to '%s'. Try running tool \
with elevated privileges" % (src, dst))
raise error
def islink(path):
if not os.path.isdir(path):
return False
data = wintypes.WIN32_FIND_DATAW()
kernel32.FindClose(kernel32.FindFirstFileW(path, ctypes.byref(data)))
if not data.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT:
return False
return IsReparseTagNameSurrogate(data.dwReserved0)
def readlink(path):
n = wintypes.DWORD()
buf = (wintypes.BYTE * MAXIMUM_REPARSE_DATA_BUFFER_SIZE)()
flags = FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS
handle = kernel32.CreateFileW(path, FILE_READ_ATTRIBUTES, 0, None,
OPEN_EXISTING, flags, None)
try:
kernel32.DeviceIoControl(handle, FSCTL_GET_REPARSE_POINT, None, 0,
buf, ctypes.sizeof(buf), ctypes.byref(n), None)
finally:
kernel32.CloseHandle(handle)
rb = REPARSE_DATA_BUFFER.from_buffer(buf)
tag = rb.ReparseTag
if tag == IO_REPARSE_TAG_SYMLINK:
return rb.SymbolicLinkReparseBuffer.PrintName
if tag == IO_REPARSE_TAG_MOUNT_POINT:
return rb.MountPointReparseBuffer.PrintName
if not IsReparseTagNameSurrogate(tag):
raise ValueError("not a link")
raise ValueError("unsupported reparse tag: %d" % tag)
def unlink(path):
return os.rmdir(path)
| 40.206061 | 98 | 0.623756 | 646 | 6,634 | 6.165635 | 0.30805 | 0.029375 | 0.026362 | 0.008034 | 0.133819 | 0.031886 | 0.0236 | 0.0236 | 0.0236 | 0 | 0 | 0.029056 | 0.289267 | 6,634 | 164 | 99 | 40.45122 | 0.815695 | 0.090142 | 0 | 0.205882 | 0 | 0 | 0.04857 | 0.007984 | 0 | 0 | 0.012974 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.029412 | 0.044118 | 0.279412 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd0e6436f7f4d69da4f67f41e70059cca6fe4c7 | 1,601 | py | Python | config/log.py | zijiei/FingerScan | 9970d1e74dad50177342da33bf18f205ba1539fa | [
"MIT"
] | null | null | null | config/log.py | zijiei/FingerScan | 9970d1e74dad50177342da33bf18f205ba1539fa | [
"MIT"
] | null | null | null | config/log.py | zijiei/FingerScan | 9970d1e74dad50177342da33bf18f205ba1539fa | [
"MIT"
] | null | null | null | import sys
import pathlib
from loguru import logger
# 路径设置
relative_directory = pathlib.Path(__file__).parent.parent # OneForAll代码相对路径
result_save_dir = relative_directory.joinpath('logs') # 结果保存目录
log_path = result_save_dir.joinpath('FingerScan.log') # OneForAll日志保存路径
# 日志配置
# 终端日志输出格式
stdout_fmt = '<cyan>{time:HH:mm:ss,SSS}</cyan> ' \
'[<level>{level: <5}</level>] ' \
'<blue>{module}</blue>:<cyan>{line}</cyan> - ' \
'<level>{message}</level>'
# 日志文件记录格式
logfile_fmt = '<light-green>{time:YYYY-MM-DD HH:mm:ss,SSS}</light-green> ' \
'[<level>{level: <5}</level>] ' \
'<cyan>{process.name}({process.id})</cyan>:' \
'<cyan>{thread.name: <18}({thread.id: <5})</cyan> | ' \
'<blue>{module}</blue>.<blue>{function}</blue>:' \
'<blue>{line}</blue> - <level>{message}</level>'
logger.remove()
logger.level(name='TRACE', color='<cyan><bold>', icon='✏️')
logger.level(name='DEBUG', color='<blue><bold>', icon='🐞 ')
logger.level(name='INFOR', no=20, color='<green><bold>', icon='ℹ️')
logger.level(name='QUITE', no=25, color='<green><bold>', icon='🤫 ')
logger.level(name='ALERT', no=30, color='<yellow><bold>', icon='⚠️')
logger.level(name='ERROR', color='<red><bold>', icon='❌️')
logger.level(name='FATAL', no=50, color='<RED><bold>', icon='☠️')
# 如果你想在命令终端静默运行OneForAll,可以将以下一行中的level设置为QUITE
# 命令终端日志级别默认为INFOR
logger.add(sys.stderr, level='INFOR', format=stdout_fmt, enqueue=True)
# 日志文件默认为级别为DEBUG
logger.add(log_path, level='DEBUG', format=logfile_fmt, enqueue=True, encoding='utf-8')
| 41.051282 | 87 | 0.626483 | 206 | 1,601 | 4.84466 | 0.427184 | 0.077154 | 0.10521 | 0.018036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010197 | 0.142411 | 1,601 | 38 | 88 | 42.131579 | 0.708667 | 0.090568 | 0 | 0.076923 | 0 | 0.038462 | 0.394464 | 0.183391 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd1527726bfc6a57f5668b7eca3aeb66629bb7c | 1,114 | py | Python | tests/model/test_work_requirement_summary.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | tests/model/test_work_requirement_summary.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | tests/model/test_work_requirement_summary.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timezone
from yellowdog_client.common.iso_datetime import iso_format
from yellowdog_client.model import WorkRequirementSummary
from yellowdog_client.model import WorkRequirementStatus
from .test_utils import should_serde
def test_serialize_populated():
obj_in_raw = WorkRequirementSummary()
obj_in_raw.id = "my_id"
obj_in_raw.namespace = "my_namespace"
obj_in_raw.name = "my_name"
obj_in_raw.tag = "my_tag"
obj_in_raw.status = WorkRequirementStatus.UNFULFILLED
obj_in_raw.completedTaskCount = 5
obj_in_raw.totalTaskCount = 10
obj_in_raw.createdTime = datetime(2014, 12, 31, 18, 30, 45, 123000, timezone.utc)
obj_in_raw.healthy = True
obj_in_dict = {
"id": "my_id",
"namespace": "my_namespace",
"name": "my_name",
"tag": "my_tag",
'priority': 0.0,
"status": "UNFULFILLED",
"completedTaskCount": 5,
"totalTaskCount": 10,
"createdTime": iso_format(datetime(2014, 12, 31, 18, 30, 45, 123456)),
"healthy": True
}
should_serde(obj_in_raw, obj_in_dict)
| 31.828571 | 85 | 0.691203 | 144 | 1,114 | 5.034722 | 0.333333 | 0.089655 | 0.121379 | 0.066207 | 0.143448 | 0.06069 | 0.06069 | 0 | 0 | 0 | 0 | 0.054237 | 0.205566 | 1,114 | 34 | 86 | 32.764706 | 0.764972 | 0 | 0 | 0 | 0 | 0 | 0.137343 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd1a30caa6315ebd6d3dd0ef22f6560d93a74a7 | 1,598 | py | Python | util/convert.py | Spacelog/earthlens | 1753da4e6194421ba152309de64d71fee5886edc | [
"CC-BY-3.0",
"CC0-1.0"
] | 4 | 2015-04-24T20:05:01.000Z | 2016-09-08T22:19:12.000Z | util/convert.py | Spacelog/earthlens | 1753da4e6194421ba152309de64d71fee5886edc | [
"CC-BY-3.0",
"CC0-1.0"
] | null | null | null | util/convert.py | Spacelog/earthlens | 1753da4e6194421ba152309de64d71fee5886edc | [
"CC-BY-3.0",
"CC0-1.0"
] | null | null | null | from __future__ import division, absolute_import, print_function, unicode_literals
import subprocess
import os
import os.path
import sys
SIZES = {'square': ['-resize', '720x720^', '+repage', '-gravity', 'Center', '-crop', '720x720+0+0'],
'large': ['-resize', '1800x1800'],
'original': []}
def get_pre_params(mission):
params = []
if mission in ('SL2', 'SL3', 'SL4'):
params += ['-fuzz', '20%', '-trim']
params.append('-normalize')
if mission in ('SL2', 'SL3', 'SL4'):
params += ['-color-matrix', '1.12 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0']
return params
def get_convert_command(in_file, size, mission, out_file):
params = get_pre_params(mission)
params += SIZES[size]
params += ['-unsharp', '0', '-quality', '90']
return ["convert", in_file] + params + [out_file]
def get_output_path(path, mission, size):
return os.path.join(path, mission, size.lower())
def process_file(input_file, output_path):
mission, roll, frame = os.path.basename(input_file).rsplit('.', 1)[0].split('-')
for size in SIZES.keys():
path = get_output_path(output_path, mission, size)
try:
os.makedirs(path)
except OSError:
pass
output_file = os.path.join(path, "%s-%s-%s.jpg" % (mission, roll, frame))
cmd = get_convert_command(input_file, size, mission, output_file)
ret = subprocess.call(cmd)
if ret != 0:
print("Failed!")
sys.exit(ret)
if __name__ == '__main__':
process_file(sys.argv[1], sys.argv[2])
| 34.73913 | 103 | 0.602628 | 226 | 1,598 | 4.084071 | 0.358407 | 0.052004 | 0.065005 | 0.073673 | 0.143012 | 0.088841 | 0.088841 | 0.032503 | 0.032503 | 0.032503 | 0 | 0.05712 | 0.222153 | 1,598 | 45 | 104 | 35.511111 | 0.685438 | 0 | 0 | 0.051282 | 0 | 0.025641 | 0.162703 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0.025641 | 0.128205 | 0.025641 | 0.307692 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd54c73287b58c57ebd75f555a973b6fc959442 | 6,285 | py | Python | bank_server/bank_server/admin_backend.py | maxCut/0xBU_SSS_ATM | 5274aa837a4f446dfc3f90ff86b35ad1c413483f | [
"Apache-2.0"
] | null | null | null | bank_server/bank_server/admin_backend.py | maxCut/0xBU_SSS_ATM | 5274aa837a4f446dfc3f90ff86b35ad1c413483f | [
"Apache-2.0"
] | null | null | null | bank_server/bank_server/admin_backend.py | maxCut/0xBU_SSS_ATM | 5274aa837a4f446dfc3f90ff86b35ad1c413483f | [
"Apache-2.0"
] | 1 | 2018-09-10T05:54:53.000Z | 2018-09-10T05:54:53.000Z | """ Admin Backend
This module implements the admin interface as defined by the rules and
requirements of the 2018 Collegiate eCTF.
The module exposes the following functions using an xmlrpcserver listening on
host 127.0.0.1 and port 1338
The following interface must be supported by the XMLRPC server
running in your Bank Server.
The following interface must be supported by the XMLRPC server
running in your Bank Server.
------------------------------------------------------------------------
function:
ready_for_atm - check if bank is ready for atms to connect
args:
None
returns:
bool: True for success, False otherwise.
------------------------------------------------------------------------
function:
create_account
args:
param1 (string - max length 1024): AccountName for created card
param2 (int): Starting account balance
returns:
xmlrpclib base64: Card provisioning material on Success.
bool: False otherwise.
------------------------------------------------------------------------
function:
update_balance
args:
param1 (string - max length 1024): AccountName of card
param2 (int): new account balance
returns:
bool: True for success, False otherwise.
------------------------------------------------------------------------
function:
check_balance
args:
param1 (string - max length 1024): AccountName of card
returns:
int: Account balance on Success.
bool: False otherwise.
------------------------------------------------------------------------
function:
create_atm
args:
None
returns:
xmlrpclib base64:: ATM provisioning material on Success.
bool: False otherwise.
------------------------------------------------------------------------
"""
import uuid
import logging
import xmlrpclib
import os
from SimpleXMLRPCServer import SimpleXMLRPCServer
from . import DB
class AdminBackend(object):
""" Implemenation of Admin Interface fulfilling competition requirements
The methods create_account, update_balance, check_balance, and create_atm
are exposed via an xmlrpc server in __init__. Introspection functions are
also expose to ease service discovery on the client-side.
"""
def __init__(self, config, db_mutex, ready_event):
""" __init__ reads config object and registers interface to xmlrpc
Args:
config (dict): dictionary with xmlrpc host and port information
as well as database filepath
db_mutex (object): mutex for accessing database
"""
super(AdminBackend, self).__init__()
self.admin_host = config['admin']['host']
self.admin_port = config['admin']['port']
self.db_path = config['database']['db_path']
self.db_mutex = db_mutex
self.ready_event = ready_event
self.db_obj = DB(db_path=self.db_path)
server = SimpleXMLRPCServer((self.admin_host, self.admin_port))
server.register_introspection_functions()
server.register_function(self.create_account)
server.register_function(self.update_balance)
server.register_function(self.check_balance)
server.register_function(self.create_atm)
server.register_function(self.ready_for_atm)
logging.info('admin interface listening on ' + self.admin_host + ':' + str(self.admin_port))
server.serve_forever()
def ready_for_atm(self):
return self.ready_event.isSet()
def create_account(self, account_name, amount):
"""Create account with account_name starting amount
Args:
account_name(string): name for account
amount(string): initial balance
Returns:
Returns randomly generated secret | card_id
False on Failure.
"""
try:
amount = int(amount)
except ValueError:
logging.info('amount must be a integer')
return False
card_id = str(uuid.uuid4())
if self.db_obj.admin_create_account(account_name, card_id, amount):
logging.info('admin create account success')
r = os.urandom(32)
rand_key = os.urandom(32)
return xmlrpclib.Binary(r + rand_key + card_id)
logging.info('admin create account failed')
return False
def update_balance(self, account_name, amount):
"""Update balance of account: account_name with amount
Args:
account_name(string): account_name of account
amount(string): new balance
Returns:
Returns True on Success. False on Failure.
"""
if self.db_obj.admin_set_balance(account_name, amount):
logging.info('admin update balance success')
return True
logging.info('admin update balance failure')
return False
def check_balance(self, account_name):
"""Check balance of account: account_name
Args:
account_name(string): account_name of account
Returns:
Returns balance (string) on Success. False on Failure.
"""
balance = self.db_obj.admin_get_balance(account_name)
if balance is not None:
logging.info('admin check_balance success')
return balance
logging.info('admin check_balance failure')
return False
#todo create provisioning data
def create_atm(self):
"""Create atm
Returns:
Returns hsm id | hsm key on Success
False on Failure.
"""
hsm_id = str(uuid.uuid4())
hsm_key = os.urandom(32)
rand_key = os.urandom(32)
if self.db_obj.admin_create_atm(hsm_id, hsm_key):
logging.info('admin create_atm success')
return xmlrpclib.Binary(hsm_key + rand_key + hsm_id)
logging.info('admin create_atm failure')
return False
| 31.903553 | 100 | 0.578202 | 676 | 6,285 | 5.214497 | 0.230769 | 0.043688 | 0.040851 | 0.036879 | 0.357163 | 0.20227 | 0.177589 | 0.139574 | 0.073191 | 0.073191 | 0 | 0.010096 | 0.290851 | 6,285 | 196 | 101 | 32.066327 | 0.780794 | 0.526651 | 0 | 0.112903 | 0 | 0 | 0.1114 | 0 | 0 | 0 | 0 | 0.005102 | 0 | 1 | 0.096774 | false | 0 | 0.096774 | 0.016129 | 0.370968 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd5f22526594e3d177c041ffc5c9d3b416938d1 | 2,276 | py | Python | sevdesk/client/models/discount_position_model.py | HpLightcorner/SevDesk-Python-Client | 303ca8dddd78da4291e7d23692ccfb147c7ba31a | [
"MIT"
] | null | null | null | sevdesk/client/models/discount_position_model.py | HpLightcorner/SevDesk-Python-Client | 303ca8dddd78da4291e7d23692ccfb147c7ba31a | [
"MIT"
] | null | null | null | sevdesk/client/models/discount_position_model.py | HpLightcorner/SevDesk-Python-Client | 303ca8dddd78da4291e7d23692ccfb147c7ba31a | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="DiscountPositionModel")
@attr.s(auto_attribs=True)
class DiscountPositionModel:
"""
Attributes:
id (Union[Unset, int]): The discount id
text (Union[Unset, None, str]):
percentage (Union[Unset, None, bool]):
value (Union[Unset, None, float]):
"""
id: Union[Unset, int] = UNSET
text: Union[Unset, None, str] = UNSET
percentage: Union[Unset, None, bool] = UNSET
value: Union[Unset, None, float] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
id = self.id
text = self.text
percentage = self.percentage
value = self.value
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if id is not UNSET:
field_dict["id"] = id
if text is not UNSET:
field_dict["text"] = text
if percentage is not UNSET:
field_dict["percentage"] = percentage
if value is not UNSET:
field_dict["value"] = value
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
id = d.pop("id", UNSET)
text = d.pop("text", UNSET)
percentage = d.pop("percentage", UNSET)
value = d.pop("value", UNSET)
discount_position_model = cls(
id=id,
text=text,
percentage=percentage,
value=value,
)
discount_position_model.additional_properties = d
return discount_position_model
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 27.756098 | 77 | 0.60413 | 276 | 2,276 | 4.822464 | 0.231884 | 0.060105 | 0.06311 | 0.045079 | 0.211871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.281195 | 2,276 | 81 | 78 | 28.098765 | 0.81357 | 0.076011 | 0 | 0 | 0 | 0 | 0.030903 | 0.01014 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12963 | false | 0 | 0.055556 | 0.055556 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd7366563ae9695eda8b234ae1a43b39041bb72 | 496 | py | Python | ditto/signup/urls.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | ditto/signup/urls.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | 9 | 2015-11-10T15:17:22.000Z | 2015-11-12T11:07:02.000Z | ditto/signup/urls.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
"",
# default Member signup view
url(r"^signup/$", views.signup, name="account_signup"),
# Role specific signup view
url(r"^signup/(\w+)/$", views.signup, name="account_signup_role"),
url(r"^invites/$", views.invites, name="invites"),
url(r"^invites/add/$", views.add_invite, name="add_invite"),
url(r"^invites/revoke/$", views.revoke_invite, name="revoke_invite")
)
| 26.105263 | 72 | 0.659274 | 65 | 496 | 4.923077 | 0.353846 | 0.0625 | 0.103125 | 0.0875 | 0.325 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163306 | 496 | 18 | 73 | 27.555556 | 0.771084 | 0.104839 | 0 | 0 | 0 | 0 | 0.290249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd82dd5ee3b5917f2191f8c81b51d94a945ac5b | 2,664 | py | Python | seafobj/backends/alioss.py | RedTailBullet/seafobj | bee2e460ac56b4415141258819f6f8832da199c3 | [
"Apache-2.0"
] | null | null | null | seafobj/backends/alioss.py | RedTailBullet/seafobj | bee2e460ac56b4415141258819f6f8832da199c3 | [
"Apache-2.0"
] | null | null | null | seafobj/backends/alioss.py | RedTailBullet/seafobj | bee2e460ac56b4415141258819f6f8832da199c3 | [
"Apache-2.0"
] | null | null | null | from .base import AbstractObjStore
from seafobj.exceptions import GetObjectError
import http.client
import oss2
# set log level to WARNING
# the api set_file_logger exists after oss2 2.6.0, which has a lot of 'INFO' log
try:
log_file_path = "log.log"
oss2.set_file_logger(log_file_path, 'oss2', logging.WARNING)
except:
pass
class OSSConf(object):
def __init__(self, key_id, key, bucket_name, host):
self.key_id = key_id
self.key = key
self.bucket_name = bucket_name
self.host = host
class SeafOSSClient(object):
'''Wraps a oss connection and a bucket'''
def __init__(self, conf):
self.conf = conf
# Due to a bug in httplib we can't use https
self.auth = oss2.Auth(conf.key_id, conf.key)
self.service = oss2.Service(self.auth, conf.host)
self.bucket = oss2.Bucket(self.auth, conf.host, conf.bucket_name)
def read_object_content(self, obj_id):
res = self.bucket.get_object(obj_id)
return res.read()
class SeafObjStoreOSS(AbstractObjStore):
'''OSS backend for seafile objects'''
def __init__(self, compressed, oss_conf, crypto=None):
AbstractObjStore.__init__(self, compressed, crypto)
self.oss_client = SeafOSSClient(oss_conf)
def read_obj_raw(self, repo_id, version, obj_id):
real_obj_id = '%s/%s' % (repo_id, obj_id)
data = self.oss_client.read_object_content(real_obj_id)
return data
def get_name(self):
return 'OSS storage backend'
def list_objs(self, repo_id=None):
object_list = []
next_marker = ''
while (1):
if repo_id != None:
Simp_obj_info = self.oss_client.bucket.list_objects(repo_id, '',next_marker)
else:
Simp_obj_info = self.oss_client.bucket.list_objects('', '', next_marker)
object_list = Simp_obj_info.object_list
for key in object_list:
token = key.key.split('/')
if len(token) == 2:
repo_id = token[0]
obj_id = token[1]
size = key.size
obj = [repo_id, obj_id, size]
yield obj
if Simp_obj_info.is_truncated == False:
break
else:
next_marker = Simp_obj_info.next_marker
def obj_exists(self, repo_id, obj_id):
key = '%s/%s' % (repo_id, obj_id)
return self.oss_client.bucket.object_exists(key)
def write_obj(self, data, repo_id, obj_id):
key = '%s/%s' % (repo_id, obj_id)
self.oss_client.bucket.put_object(key, data)
| 32.096386 | 92 | 0.611111 | 366 | 2,664 | 4.174863 | 0.284153 | 0.039267 | 0.051047 | 0.043194 | 0.097513 | 0.097513 | 0.089005 | 0.089005 | 0.089005 | 0.03534 | 0 | 0.007392 | 0.289039 | 2,664 | 82 | 93 | 32.487805 | 0.799366 | 0.080706 | 0 | 0.065574 | 0 | 0 | 0.018883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147541 | false | 0.016393 | 0.065574 | 0.016393 | 0.327869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acd91e8148dcb1e64702755b93a778e15518124b | 9,147 | py | Python | data/cv_to_wav.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | null | null | null | data/cv_to_wav.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | null | null | null | data/cv_to_wav.py | ralfeger/language-identification | 80c79423389207f197911d7b0eb78143f25f44b6 | [
"BSD-2-Clause"
] | null | null | null | """
:author:
Paul Bethge (bethge@zkm.de)
2021
:License:
This package is published under Simplified BSD License.
"""
"""
This script extracts and converts audio samples from Common Voice.
"""
import os
import pydub
import argparse
from threading import Thread
import numpy as np
import scipy.io.wavfile as wav
import shutil
from yaml import load
from src.audio.chop_up import chop_up_audio
def sentence_is_too_short(sentence_len, language):
if language == "mandarin":
return sentence_len < 3
else:
return sentence_len < 6
def traverse_csv(language, input_dir, output_dir, max_chops,
desired_audio_length_s, sample_rate, sample_width,
allowed_downvotes, remove_raw, min_length_s, max_silence_s,
energy_threshold, use_validated_set):
"""
traverses the language specific file, extract and save important samples.
"""
lang = language["lang"]
lang_abb = language["dir"]
input_sub_dir = os.path.join(input_dir, lang_abb)
input_sub_dir_clips = os.path.join(input_sub_dir, "clips")
splits = ["train", "dev", "test"]
fast_forward = 0
for split_index, split in enumerate(splits):
output_dir_wav = os.path.join(output_dir, "wav", split, lang)
output_dir_raw = os.path.join(output_dir, "raw", split, lang)
# create subdirectories in the output directory
if not os.path.exists(output_dir_wav):
os.makedirs(output_dir_wav)
if not os.path.exists(output_dir_raw):
os.makedirs(output_dir_raw)
# keep track of files handled
processed_files = 0
produced_files = 0
to_produce = int(max_chops[split_index])
done = False
if use_validated_set:
input_clips_file = os.path.join(input_sub_dir, "validated.tsv")
if to_produce == -1:
print("when using validated.tsv, please set number of chops to a positive number")
exit()
else:
input_clips_file = os.path.join(input_sub_dir, split + ".tsv")
# open mozillas' dataset file
with open(input_clips_file) as f:
try:
# skip the first line
line = f.readline()
# when using the validated.tsv we have to start where we left off
if use_validated_set:
for skip in range(fast_forward):
f.readline()
while True:
# get a line
line = f.readline().split('\t')
# if the line is not empty
if line[0] != "":
# check if the sample contains more than X symbols
# and has not more than Y downvotes
sentence = line[2]
too_short = sentence_is_too_short(len(sentence), language["lang"])
messy = int(line[4]) > allowed_downvotes
if too_short or messy:
continue
# get mp3 filename
mp3_filename = line[1]
mp3_path = os.path.join(input_sub_dir_clips, mp3_filename)
wav_path_raw = os.path.join(output_dir_raw,
mp3_filename[:-4] + ".wav")
# convert mp3 to wav
audio = pydub.AudioSegment.from_mp3(mp3_path)
audio = pydub.effects.normalize(audio)
audio = audio.set_frame_rate(sample_rate)
audio = audio.set_channels(1)
audio = audio.set_sample_width(sample_width)
audio.export(wav_path_raw, format="wav")
processed_files += 1
# chop up the samples and write to file
rand_int = np.random.randint(low=0, high=2)
padding_choice = ["Data", "Silence"][rand_int]
chips = chop_up_audio (wav_path_raw, padding=padding_choice,
desired_length_s=desired_audio_length_s,
min_length_s=min_length_s, max_silence_s=max_silence_s,
threshold=energy_threshold)
for chip_name, chip_fs, chip_data in chips:
wav_path = os.path.join(output_dir_wav, chip_name + ".wav")
wav.write(wav_path, chip_fs, chip_data)
produced_files += 1
# remove the intermediate file
if remove_raw and os.path.exists(wav_path_raw):
os.remove(wav_path_raw)
# check if we are done yet
if to_produce != -1 and produced_files >= to_produce:
done = True
break
if done:
# when using the validated.tsv we have to make sure the same
# speakers wont appear in more than one set. Luckely, they
# are in ordered by speaker hash id.
if use_validated_set:
last_speaker = speaker = line[0]
while speaker == last_speaker:
speaker = f.readline().split('\t')[0]
fast_forward += 1
break
else:
print("Nothing left!")
break
except Exception as e:
print("Error:", e)
print("Processed %d mp3 files for %s-%s" % (processed_files, lang, split))
print("Produced %d wav files for %s-%s" % (produced_files, lang, split))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', default=None,
help="path to the config yaml file. When given, arguments will be ignored")
parser.add_argument("--cv_input_dir", type=str, default=None,
help="directory containing all languages")
parser.add_argument("--cv_output_dir", type=str, default="../res",
help="directory to receive converted clips of all languages")
# Data
parser.add_argument("--max_chops", type=int, nargs=3, default=[-1, -1, -1],
help="amount of maximum wav chops to be produced per split. -1 means all.")
parser.add_argument("--use_validated_set", type=bool, default=False,
help="whether to use the train, test and dev sets or all validated data")
parser.add_argument("--allowed_downvotes", type=int, default=0,
help="amount of downvotes allowed")
# Audio file properties
parser.add_argument("--audio_length_s", type=int, default=5,
help="length of wav files being produced")
parser.add_argument("--min_length_s", type=float, default=2.5,
help="min length of an audio event")
parser.add_argument("--max_silence_s", type=float, default=1,
help="max length of silence in an audio event")
parser.add_argument("--energy_threshold", type=float, default=60,
help="minimum energy for a frame to be valid")
parser.add_argument("--sample_rate", type=int, default=16000,
help="sample rate of files being produced")
parser.add_argument('--sample_width', type=int, default=2, choices=(1, 2, 4),
help='number of bytes per sample')
# System
parser.add_argument("--parallelize", type=bool, default=True,
help="whether to use multiprocessing")
parser.add_argument("--remove_raw", type=bool, default=True,
help="whether to remove intermediate file")
args = parser.parse_args()
# overwrite arguments when config is given
if args.config_path:
config = load(open(args.config_path, "rb"))
if config is None:
print("Could not find config file")
exit(-1)
else:
args.cv_input_dir = config["cv_input_dir"]
args.cv_output_dir = config["cv_output_dir"]
args.max_chops = config["max_chops"]
args.allowed_downvotes = config["allowed_downvotes"]
args.audio_length_s = config["audio_length_s"]
args.max_silence_s = config["max_silence_s"]
args.min_length_s = config["min_length_s"]
args.energy_threshold = config["energy_threshold"]
args.sample_rate = config["sample_rate"]
args.sample_width = config["sample_width"]
args.parallelize = config["parallelize"]
args.remove_raw = config["remove_raw"]
args.use_validated_set = config["use_validated_set"]
language_table = config["language_table"]
# copy config to output dir
if not os.path.exists(args.cv_output_dir):
os.makedirs(args.cv_output_dir)
shutil.copy(args.config_path, args.cv_output_dir)
else:
language_table = [
{"lang": "english", "dir": "en"},
{"lang": "german", "dir": "de"},
{"lang": "french", "dir": "fr"},
{"lang": "spanish", "dir": "es"},
{"lang": "mandarin", "dir": "zh-CN"},
{"lang": "russian", "dir": "ru"},
# {"lang": "unknown", "dir": "ja"},
# {"lang": "unknown", "dir": "ar"},
# {"lang": "unknown", "dir": "ta"},
# {"lang": "unknown", "dir": "pt"},
# {"lang": "unknown", "dir": "tr"},
# {"lang": "unknown", "dir": "it"},
# {"lang": "unknown", "dir": "uk"},
# {"lang": "unknown", "dir": "el"},
# {"lang": "unknown", "dir": "id"},
# {"lang": "unknown", "dir": "fy-NL"},
]
# count the number of unknown languages
unknown = 0
for language in language_table:
if language["lang"] == "unknown":
unknown += 1
threads = []
for language in language_table:
max_chops = args.max_chops
if language["lang"] == "unknown":
max_chops /= unknown
# prepare arguments
function_args = (language, args.cv_input_dir, args.cv_output_dir, args.max_chops,
args.audio_length_s, args.sample_rate, args.sample_width,
args.allowed_downvotes, args.remove_raw, args.min_length_s,
args.max_silence_s, args.energy_threshold, args.use_validated_set)
# process current language for all splits
if args.parallelize:
threads.append(Thread(target=traverse_csv, args=function_args,daemon=True))
else:
traverse_csv(*function_args)
# wait for threads to end
if args.parallelize:
for t in threads:
t.start()
for t in threads:
t.join()
if args.remove_raw:
shutil.rmtree(os.path.join(args.cv_output_dir, "raw"))
| 32.902878 | 86 | 0.674319 | 1,312 | 9,147 | 4.493902 | 0.230183 | 0.030529 | 0.040366 | 0.015265 | 0.145692 | 0.117877 | 0.068182 | 0.022727 | 0.011872 | 0 | 0 | 0.007791 | 0.200175 | 9,147 | 277 | 87 | 33.021661 | 0.798114 | 0.145731 | 0 | 0.10929 | 0 | 0 | 0.17833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010929 | false | 0 | 0.04918 | 0 | 0.071038 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acda0b765a740e29df4ffe4f18b5ae0a51533d86 | 2,628 | py | Python | samples/asynctests/test_loop_param_async.py | scbedd/azure-uamqp-python | f27e927bb36719b831d592def5cc852b45db56c8 | [
"MIT"
] | null | null | null | samples/asynctests/test_loop_param_async.py | scbedd/azure-uamqp-python | f27e927bb36719b831d592def5cc852b45db56c8 | [
"MIT"
] | 2 | 2019-03-22T19:08:34.000Z | 2019-05-17T23:42:59.000Z | samples/asynctests/test_loop_param_async.py | scbedd/azure-uamqp-python | f27e927bb36719b831d592def5cc852b45db56c8 | [
"MIT"
] | null | null | null | import sys
import pytest
import asyncio
from uamqp.async_ops.mgmt_operation_async import MgmtOperationAsync
from uamqp.async_ops.receiver_async import MessageReceiverAsync
from uamqp.authentication.cbs_auth_async import CBSAsyncAuthMixin
from uamqp.async_ops.sender_async import MessageSenderAsync
from uamqp.async_ops.client_async import (
AMQPClientAsync,
SendClientAsync,
ReceiveClientAsync,
ConnectionAsync,
)
@pytest.mark.asyncio
@pytest.mark.skipif(sys.version_info < (3, 10), reason="raise error if loop passed in >=3.10")
async def test_error_loop_arg_async():
with pytest.raises(ValueError) as e:
AMQPClientAsync("fake_addr", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
client_async = AMQPClientAsync("sb://resourcename.servicebus.windows.net/")
assert len(client_async._internal_kwargs) == 0 # pylint:disable=protected-access
with pytest.raises(ValueError) as e:
SendClientAsync("fake_addr", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
client_async = SendClientAsync("sb://resourcename.servicebus.windows.net/")
assert len(client_async._internal_kwargs) == 0 # pylint:disable=protected-access
with pytest.raises(ValueError) as e:
ReceiveClientAsync("fake_addr", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
client_async = ReceiveClientAsync("sb://resourcename.servicebus.windows.net/")
assert len(client_async._internal_kwargs) == 0 # pylint:disable=protected-access
with pytest.raises(ValueError) as e:
ConnectionAsync("fake_addr", sasl='fake_sasl', loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
with pytest.raises(ValueError) as e:
MgmtOperationAsync("fake_addr", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
with pytest.raises(ValueError) as e:
MessageReceiverAsync("fake_addr", "session", "target", "on_message_received", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
with pytest.raises(ValueError) as e:
MessageSenderAsync("fake_addr", "source", "target", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
async def auth_async_loop():
auth_async = CBSAsyncAuthMixin()
with pytest.raises(ValueError) as e:
await auth_async.create_authenticator_async("fake_conn", loop=asyncio.get_event_loop())
assert "no longer supports loop" in e
loop = asyncio.get_event_loop()
loop.run_until_complete(auth_async_loop())
| 43.8 | 116 | 0.73554 | 337 | 2,628 | 5.537092 | 0.237389 | 0.053055 | 0.067524 | 0.09164 | 0.550375 | 0.538049 | 0.506967 | 0.506967 | 0.506967 | 0.506967 | 0 | 0.00411 | 0.166667 | 2,628 | 59 | 117 | 44.542373 | 0.847945 | 0.036149 | 0 | 0.38 | 0 | 0 | 0.185053 | 0.048636 | 0 | 0 | 0 | 0 | 0.22 | 1 | 0 | false | 0.02 | 0.16 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ace72c7b0b393461b2fda6bee82b7824fc377c54 | 5,542 | py | Python | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/test/exchange/dataframe_protocol/test_general.py | yizx-1017/modin | 2eee697135b30a9694c202456db0635c52c9e6c9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Dataframe exchange protocol tests that are common for every implementation."""
import pytest
import math
import ctypes
import modin.pandas as pd
@pytest.fixture
def df_from_dict():
def maker(dct, is_categorical=False):
df = pd.DataFrame(dct, dtype=("category" if is_categorical else None))
return df
return maker
@pytest.mark.parametrize(
"test_data",
[
{"a": ["foo", "bar"], "b": ["baz", "qux"]},
{"a": [1.5, 2.5, 3.5], "b": [9.2, 10.5, 11.8]},
{"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]},
],
ids=["str_data", "float_data", "int_data"],
)
def test_only_one_dtype(test_data, df_from_dict):
columns = list(test_data.keys())
df = df_from_dict(test_data)
dfX = df.__dataframe__()
column_size = len(test_data[columns[0]])
for column in columns:
assert dfX.get_column_by_name(column).null_count == 0
assert dfX.get_column_by_name(column).size == column_size
assert dfX.get_column_by_name(column).offset == 0
def test_float_int(df_from_dict):
df = df_from_dict(
{
"a": [1, 2, 3],
"b": [3, 4, 5],
"c": [1.5, 2.5, 3.5],
"d": [9, 10, 11],
"e": [True, False, True],
"f": ["a", "", "c"],
}
)
dfX = df.__dataframe__()
columns = {"a": 0, "b": 0, "c": 2, "d": 0, "e": 20, "f": 21}
for column, kind in columns.items():
colX = dfX.get_column_by_name(column)
assert colX.null_count == 0
assert colX.size == 3
assert colX.offset == 0
assert colX.dtype[0] == kind
assert dfX.get_column_by_name("c").dtype[1] == 64
def test_na_float(df_from_dict):
df = df_from_dict({"a": [1.0, math.nan, 2.0]})
dfX = df.__dataframe__()
colX = dfX.get_column_by_name("a")
assert colX.null_count == 1
def test_noncategorical(df_from_dict):
df = df_from_dict({"a": [1, 2, 3]})
dfX = df.__dataframe__()
colX = dfX.get_column_by_name("a")
with pytest.raises(RuntimeError):
colX.describe_categorical
def test_categorical(df_from_dict):
df = df_from_dict(
{"weekday": ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]},
is_categorical=True,
)
colX = df.__dataframe__().get_column_by_name("weekday")
is_ordered, is_dictionary, _ = colX.describe_categorical.values()
assert isinstance(is_ordered, bool)
assert isinstance(is_dictionary, bool)
def test_dataframe(df_from_dict):
df = df_from_dict(
{"x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8]}
)
dfX = df.__dataframe__()
assert dfX.num_columns() == 3
assert dfX.num_rows() == 3
assert dfX.num_chunks() == 1
assert list(dfX.column_names()) == ["x", "y", "z"]
assert list(dfX.select_columns((0, 2)).column_names()) == list(
dfX.select_columns_by_name(("x", "z")).column_names()
)
@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
def test_df_get_chunks(size, n_chunks, df_from_dict):
df = df_from_dict({"x": list(range(size))})
dfX = df.__dataframe__()
chunks = list(dfX.get_chunks(n_chunks))
assert len(chunks) == n_chunks
assert sum(chunk.num_rows() for chunk in chunks) == size
@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)])
def test_column_get_chunks(size, n_chunks, df_from_dict):
df = df_from_dict({"x": list(range(size))})
dfX = df.__dataframe__()
chunks = list(dfX.get_column(0).get_chunks(n_chunks))
assert len(chunks) == n_chunks
assert sum(chunk.size for chunk in chunks) == size
def test_get_columns(df_from_dict):
df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]})
dfX = df.__dataframe__()
for colX in dfX.get_columns():
assert colX.size == 2
assert colX.num_chunks() == 1
assert dfX.get_column(0).dtype[0] == 0
assert dfX.get_column(1).dtype[0] == 2
def test_buffer(df_from_dict):
arr = [0, 1, -1]
df = df_from_dict({"a": arr})
dfX = df.__dataframe__()
colX = dfX.get_column(0)
bufX = colX.get_buffers()
dataBuf, dataDtype = bufX["data"]
assert dataBuf.bufsize > 0
assert dataBuf.ptr != 0
device, _ = dataBuf.__dlpack_device__()
assert dataDtype[0] == 0
if device == 1: # CPU-only as we're going to directly read memory here
bitwidth = dataDtype[1]
ctype = {
8: ctypes.c_int8,
16: ctypes.c_int16,
32: ctypes.c_int32,
64: ctypes.c_int64,
}[bitwidth]
for idx, truth in enumerate(arr):
val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value
assert val == truth, f"Buffer at index {idx} mismatch"
| 31.850575 | 87 | 0.621075 | 826 | 5,542 | 3.940678 | 0.25908 | 0.03871 | 0.064516 | 0.036866 | 0.264209 | 0.241782 | 0.217204 | 0.173579 | 0.159447 | 0.152074 | 0 | 0.03345 | 0.228618 | 5,542 | 173 | 88 | 32.034682 | 0.727953 | 0.160051 | 0 | 0.16129 | 0 | 0 | 0.041406 | 0 | 0 | 0 | 0 | 0 | 0.225806 | 1 | 0.096774 | false | 0 | 0.032258 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ace760596f541a5dea7640587f44a15d7febfbce | 8,341 | py | Python | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | null | null | null | pyy1/.pycharm_helpers/pydev/_pydevd_bundle/pydevd_trace_dispatch_regular.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
] | 1 | 2019-02-06T14:50:03.000Z | 2019-02-06T14:50:03.000Z | import traceback
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import get_thread_id
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE
from _pydevd_bundle.pydevd_kill_all_pydevd_threads import kill_all_pydev_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from _pydevd_bundle.pydevd_tracing import SetTrace
# IFDEF CYTHON
# # In Cython, PyDBAdditionalThreadInfo is bundled in the file.
# from cpython.object cimport PyObject
# from cpython.ref cimport Py_INCREF, Py_XDECREF
# ELSE
from _pydevd_bundle.pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from _pydevd_bundle.pydevd_frame import PyDBFrame
# ENDIF
threadingCurrentThread = threading.currentThread
get_file_type = DONT_TRACE.get
# IFDEF CYTHON -- DONT EDIT THIS FILE (it is automatically generated)
# cdef dict global_cache_skips
# cdef dict global_cache_frame_skips
# ELSE
# ENDIF
# Cache where we should keep that we completely skipped entering some context.
# It needs to be invalidated when:
# - Breakpoints are changed
# It can be used when running regularly (without step over/step in/step return)
global_cache_skips = {}
global_cache_frame_skips = {}
def trace_dispatch(py_db, frame, event, arg):
t = threadingCurrentThread()
if getattr(t, 'pydev_do_not_trace', None):
return None
try:
additional_info = t.additional_info
if additional_info is None:
raise AttributeError()
except:
additional_info = t.additional_info = PyDBAdditionalThreadInfo()
thread_tracer = ThreadTracer((py_db, t, additional_info, global_cache_skips, global_cache_frame_skips))
# IFDEF CYTHON
# t._tracer = thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# ELSE
# ENDIF
SetTrace(thread_tracer.__call__)
return thread_tracer.__call__(frame, event, arg)
# IFDEF CYTHON
# cdef class SafeCallWrapper:
# cdef method_object
# def __init__(self, method_object):
# self.method_object = method_object
# def __call__(self, *args):
# #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field
# #in the frame, and that reference might get destroyed by set trace on frame and parents
# cdef PyObject* method_obj = <PyObject*> self.method_object
# Py_INCREF(<object>method_obj)
# ret = (<object>method_obj)(*args)
# Py_XDECREF (method_obj)
# return SafeCallWrapper(ret) if ret is not None else None
# cdef class ThreadTracer:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class ThreadTracer:
def __init__(self, args):
self._args = args
# ENDIF
def __call__(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
:param PyDB py_db:
This is the global debugger (this method should actually be added as a method to it).
'''
# IFDEF CYTHON
# cdef str filename;
# cdef str base;
# cdef int pydev_step_cmd;
# cdef tuple cache_key;
# cdef dict cache_skips;
# cdef bint is_stepping;
# cdef tuple abs_path_real_path_and_base;
# cdef PyDBAdditionalThreadInfo additional_info;
# ENDIF
# print('ENTER: trace_dispatch', frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name)
py_db, t, additional_info, cache_skips, frame_skips_cache = self._args
pydev_step_cmd = additional_info.pydev_step_cmd
is_stepping = pydev_step_cmd != -1
try:
if py_db._finish_debugging_session:
if not py_db._termination_event_set:
#that was not working very well because jython gave some socket errors
try:
if py_db.output_checker is None:
kill_all_pydev_threads()
except:
traceback.print_exc()
py_db._termination_event_set = True
return None
# if thread is not alive, cancel trace_dispatch processing
if not is_thread_alive(t):
py_db._process_thread_not_alive(get_thread_id(t))
return None # suspend tracing
try:
# Make fast path faster!
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
if py_db.thread_analyser is not None:
py_db.thread_analyser.log_event(frame)
if py_db.asyncio_analyser is not None:
py_db.asyncio_analyser.log_event(frame)
filename = abs_path_real_path_and_base[1]
# Note: it's important that the context name is also given because we may hit something once
# in the global context and another in the local context.
cache_key = (frame.f_lineno, frame.f_code.co_name, filename)
if not is_stepping and cache_key in cache_skips:
# print('skipped: trace_dispatch (cache hit)', cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None
file_type = get_file_type(abs_path_real_path_and_base[-1]) #we don't want to debug threading or anything related to pydevd
if file_type is not None:
if file_type == 1: # inlining LIB_FILE = 1
if py_db.not_in_scope(filename):
# print('skipped: trace_dispatch (not in scope)', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[cache_key] = 1
return None
else:
# print('skipped: trace_dispatch', abs_path_real_path_and_base[-1], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[cache_key] = 1
return None
if is_stepping:
if py_db.is_filter_enabled and py_db.is_ignored_by_filters(filename):
# ignore files matching stepping filters
return None
if py_db.is_filter_libraries and py_db.not_in_scope(filename):
# ignore library files while stepping
return None
# print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, file_type)
if additional_info.is_tracing:
return None #we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
# Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak
# reference to the frame).
ret = PyDBFrame((py_db, filename, additional_info, t, frame_skips_cache, (frame.f_code.co_name, frame.f_code.co_firstlineno, filename))).trace_dispatch(frame, event, arg)
if ret is None:
cache_skips[cache_key] = 1
return None
# IFDEF CYTHON
# return SafeCallWrapper(ret)
# ELSE
return ret
# ENDIF
except SystemExit:
return None
except Exception:
if py_db._finish_debugging_session:
return None # Don't log errors when we're shutting down.
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
return None
| 41.705 | 182 | 0.65208 | 1,095 | 8,341 | 4.663927 | 0.237443 | 0.015665 | 0.019581 | 0.023497 | 0.20325 | 0.140591 | 0.099863 | 0.070687 | 0.070687 | 0.044449 | 0 | 0.002527 | 0.288335 | 8,341 | 199 | 183 | 41.914573 | 0.857817 | 0.420933 | 0 | 0.318681 | 0 | 0 | 0.003853 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032967 | false | 0.010989 | 0.10989 | 0 | 0.318681 | 0.021978 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ace971367291d8e5e0522f781af9688ce461440b | 1,530 | py | Python | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | null | null | null | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | null | null | null | FaceDetection/ManualDetect.py | imkiller32/ImageProcessing-Finding-Particles- | 4ac4d801203737e27429d102421435ac874d533b | [
"MIT"
] | 1 | 2019-10-07T18:53:37.000Z | 2019-10-07T18:53:37.000Z | #This uses a video loaded from some directory ..You can specify your own path
#----------------------------------------#
#FACE DETECTION USING PYTHON3 AND OPENCV #
#--------AUTHOR- Ritesh Aggarwal---------#
#-----------Language->Python3------------#
#-----------Github:->imkiller32----------#
#---------Enjoy Your DETECTION-----------#
#importing useful library
import cv2
#import numpy as np
def main():
path = "C:\\Users\\imkiller\\AppData\\Local\\Programs\\Python\\Python36-32\\Lib\\site-packages\\cv2\\data\\"
ClassifierPath= path + "haarcascade_frontalface_default.xml"
facedetect=cv2.CascadeClassifier(ClassifierPath)
#resolution
w=800
h=600
#select a video path
cap=cv2.VideoCapture("E:\FILES\motivational\ABC.mp4")
#setting width and height
cap.set(3,w)
cap.set(4,h)
while cap.isOpened():
ret,frame=cap.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = facedetect.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
#debug
print('ok')
#Red color box over Face
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
cv2.imshow('DETECTION',frame)
if cv2.waitKey(1)==27: #exit on ESC
break
#releasing camera
cap.release()
#destroy window created
cv2.destroyAllWindows()
print('Bye...')
if __name__ == "__main__":
print('Starting software...')
main()
| 26.842105 | 113 | 0.555556 | 175 | 1,530 | 4.794286 | 0.697143 | 0.014303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034632 | 0.245098 | 1,530 | 56 | 114 | 27.321429 | 0.691775 | 0.318954 | 0 | 0 | 0 | 0.038462 | 0.215992 | 0.169263 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.038462 | 0 | 0.076923 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aceaec3ffa90f4f287b5276fec7f303eddd0bcbc | 7,378 | py | Python | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | 1 | 2020-05-05T07:38:03.000Z | 2020-05-05T07:38:03.000Z | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | null | null | null | enemy.py | jeremycryan/ScoreSpace8 | bc3418d5e3e132a7b4a177b2ebce4fc156a24f20 | [
"MIT"
] | null | null | null | import constants as c
import pygame
import math
from particle import Particle, Chunk, Fadeout
import os
import random
import time
lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern.png"))
lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "lantern_touched.png"))
big_lantern_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern.png"))
big_lantern_touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "big_lantern_touched.png"))
perfect_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "perfect.png"))
perfect_surf_large = pygame.transform.scale(perfect_surf, (perfect_surf.get_width()*2, perfect_surf.get_height()*2))
good_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "good.png"))
okay_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "okay.png"))
nope_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "nope.png"))
class Enemy:
def __init__(self, game, radius = 30, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = radius
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
# self.draw_surf.set_colorkey(c.BLACK)
# self.touched_surf.set_colorkey(c.BLACK)
self.touched = False
self.launch_factor=1.0
self.glow = self.generate_glow()
self.age = random.random()
def generate_glow(self, radius=1.7):
glow_radius = int(radius * self.radius)
self.glow = pygame.Surface((glow_radius*2, glow_radius*2))
pygame.draw.circle(self.glow, c.WHITE, (glow_radius, glow_radius), glow_radius)
self.glow.set_alpha(20)
self.glow.set_colorkey(c.BLACK)
return self.glow
def update(self, dt, events):
if self.y < self.game.y_offset - self.radius*3:
self.remove()
self.age += dt
radius = 1.7 + 0.07*math.sin(self.age*25)
if self.y < self.game.y_offset + 1.5*c.WINDOW_HEIGHT:
self.glow = self.generate_glow(radius)
def draw(self, surface):
if self.y > self.game.y_offset + c.WINDOW_HEIGHT*2:
return
x, y = self.game.game_position_to_screen_position((self.x, self.y))
surface.blit(self.glow, (int(x - self.glow.get_width()//2), int(y - self.glow.get_height()//2)))
if not self.touched:
surface.blit(self.draw_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
else:
surface.blit(self.touched_surf,
(int(x - self.draw_surf.get_width()/2), int(y - self.draw_surf.get_height()/2)))
def touch(self):
self.touched = True
def remove(self):
self.game.enemies.remove(self)
def destroy(self, cut_prop=0.5):
self.remove()
angle = self.game.player.get_angle()
cutoff = int(cut_prop*self.radius*2)
top_offset = self.radius - cutoff//2
bottom_offset = -cutoff//2
angle_rad = -angle/180 * math.pi
top_offset = (top_offset * math.sin(angle_rad), top_offset * math.cos(angle_rad))
bottom_offset = (bottom_offset * math.sin(angle_rad), bottom_offset * math.cos(angle_rad))
particle_surf = pygame.Surface((self.radius*2, cutoff))
particle_surf.blit(self.surf, (0, 0))
top_half = Particle(self.game,
particle_surf,
(self.x + top_offset[0], self.y + top_offset[1]),
rotation=120,
velocity=(-30, 500),
angle=angle)
self.game.particles.append(top_half)
particle_surf = pygame.Surface((self.radius*2, self.radius*2 - cutoff))
particle_surf.blit(self.surf, (0, -cutoff))
bottom_half = Particle(self.game,
particle_surf,
(self.x + bottom_offset[0], self.y + bottom_offset[1]),
rotation=-40,
velocity=(60, 150),
angle=angle)
self.game.particles.append(bottom_half)
self.game.particles.append(Fadeout(self.game, self.glow, (self.x, self.y)))
for i in range(30):
self.game.particles.append(Chunk(self.game, (self.x, self.y)))
if abs(cut_prop - 0.5) < 0.02:
self.glow.set_alpha(100)
surf = perfect_surf.copy().convert()
surf2 = perfect_surf_large.copy().convert()
surf2.set_colorkey((255, 0, 255))
surf2.set_alpha(90)
self.game.text_particles.append(Fadeout(self.game, surf2, (self.x, self.y), rate=200))
self.game.flare_up(60)
self.game.tear_sound()
elif abs(cut_prop - 0.5) < 0.25:
surf = good_surf.copy().convert()
self.game.bad_tear_sound()
else:
surf = okay_surf.copy().convert()
self.game.bad_tear_sound()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
class BigEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 40
self.x = x
self.y = y
self.angle = random.random() * 60 - 30
self.surf = big_lantern_surf
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = big_lantern_touched_surf
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.3
self.age = 0
self.glow = self.generate_glow()
class TutorialEnemy(BigEnemy):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def draw(self, surface):
super().draw(surface)
def destroy(self, cut_prop=0.5):
if abs(cut_prop - 0.5) < 0.02:
super().destroy(cut_prop=cut_prop)
else:
self.game.nope.play()
self.game.shake_effect(15)
surf = nope_surf.copy().convert()
surf.set_colorkey((255, 0, 255))
surf.set_alpha(255)
self.game.text_particles.append(Fadeout(self.game, surf, (self.x, self.y), rate=400))
self.since_hit = 0
class SmallEnemy(Enemy):
def __init__(self, game, x=c.WINDOW_WIDTH//2, y=c.WINDOW_HEIGHT//2):
self.game = game
self.radius = 35
self.x = x
self.y = y
self.angle = random.random() * 60 + 15
self.surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern.png"))
self.draw_surf = pygame.transform.rotate(self.surf, self.angle)
self.touched_surf = pygame.image.load(os.path.join(c.ASSETS_PATH, "small_lantern_touched.png"))
self.touched_surf = pygame.transform.rotate(self.touched_surf, self.angle)
self.touched = False
self.launch_factor = 1.15
self.age = 0
self.glow = self.generate_glow() | 40.31694 | 116 | 0.604364 | 1,024 | 7,378 | 4.175781 | 0.140625 | 0.059869 | 0.038587 | 0.044434 | 0.598924 | 0.55449 | 0.526193 | 0.465622 | 0.399205 | 0.399205 | 0 | 0.02945 | 0.263622 | 7,378 | 183 | 117 | 40.31694 | 0.757592 | 0.010301 | 0 | 0.337662 | 0 | 0 | 0.019863 | 0.006575 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077922 | false | 0 | 0.045455 | 0 | 0.162338 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
aceb2f92ca07640272fc52b46d85a05db48cf38b | 455 | py | Python | run.py | diazjf/countdowner | 850cc800f7d945cb6308adafdbdf0e2e582d54d0 | [
"MIT"
] | null | null | null | run.py | diazjf/countdowner | 850cc800f7d945cb6308adafdbdf0e2e582d54d0 | [
"MIT"
] | null | null | null | run.py | diazjf/countdowner | 850cc800f7d945cb6308adafdbdf0e2e582d54d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import gui, timer
import threading, time
def countdown(view):
while True:
time_remaining = timer.getTimeRemaining()
view.changeLabel(time_remaining)
time.sleep(1)
def main():
g = gui.GUI()
# Update the UI in the background
thread1 = threading.Thread(target=countdown, args = [g])
thread1.setDaemon(True)
thread1.start()
g.mainloop()
if __name__ == '__main__':
main() | 19.782609 | 60 | 0.643956 | 55 | 455 | 5.145455 | 0.636364 | 0.091873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014409 | 0.237363 | 455 | 23 | 61 | 19.782609 | 0.801153 | 0.116484 | 0 | 0 | 0 | 0 | 0.01995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf22d31d75b8095056cff14cf913de4d4d8590e | 592 | py | Python | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | pascals_triangle.py | canberkeh/Algorithms | 5d4ac443a76e492332ccefa69b71bea62fe83aa1 | [
"Unlicense"
] | null | null | null | def pascal(num):
if num > 1:
p = [[1], [1, 1]] #ilk iki eleman belli olduğu için direkt yazıyoruz
for i in range(2, num): #ilk iki eleman belli olduğu için direkt yazıyoruz.
a = [1] # baş ve sona hep 1 geldiği için
for j in range(1, i): # baş ve son 1 olacak, bunun içerisine yeni sayılar eklenecek
a.append(p[i-1][j-1] + p[i-1][j]) # her seferinde döngüdeki bir önceki elemanı ekleyecek
a.append(1) # sonuna 1 ekliyor
p.append(a)
return p
elif num == 1:return [[1]]
print(pascal(5)) | 49.333333 | 104 | 0.565878 | 94 | 592 | 3.56383 | 0.5 | 0.023881 | 0.071642 | 0.101493 | 0.250746 | 0.250746 | 0.250746 | 0.250746 | 0 | 0 | 0 | 0.0425 | 0.324324 | 592 | 12 | 105 | 49.333333 | 0.795 | 0.440878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf4309ff4ee23908a5d44126b9f642da83a9477 | 3,680 | py | Python | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/tests/dashboard/test_monitoring_app.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | """Test the functions of the monitoring app."""
import webbrowser
from pathlib import Path
import pandas as pd
import pytest
from bokeh.document import Document
from bokeh.io import output_file
from bokeh.io import save
from bokeh.models import ColumnDataSource
import estimagic.dashboard.monitoring_app as monitoring
from estimagic.logging.create_database import load_database
@pytest.fixture()
def database():
database_name = "db1.db"
current_dir_path = Path(__file__).resolve().parent
database_path = current_dir_path / database_name
database = load_database(database_path)
return database
def test_monitoring_app():
"""Integration test that no Error is raised when calling the monitoring app."""
doc = Document()
database_name = "test_db"
current_dir_path = Path(__file__).resolve().parent
session_data = {"last_retrieved": 0, "database_path": current_dir_path / "db1.db"}
monitoring.monitoring_app(
doc=doc, database_name=database_name, session_data=session_data
)
def test_create_bokeh_data_sources(database):
tables = ["criterion_history", "params_history"]
criterion_history, params_history = monitoring._create_bokeh_data_sources(
database=database, tables=tables
)
assert criterion_history.data == {"iteration": [1], "value": [426.5586492569206]}
assert params_history.data == {
"iteration": [1],
"beta_pared": [0.47738201898674737],
"beta_public": [0.22650218067445926],
"beta_gpa": [-0.46745804687921866],
"cutoff_0": [0.0],
"cutoff_1": [2.0],
}
# skip test create_initial_convergence_plots
def test_plot_time_series_with_large_initial_values():
cds = ColumnDataSource({"y": [2e17, 1e16, 1e5], "x": [1, 2, 3]})
title = "Are large initial values shown?"
fig = monitoring._plot_time_series(data=cds, y_keys=["y"], x_name="x", title=title)
title = "Test _plot_time_series can handle large initial values."
output_file("time_series_initial_value.html", title=title)
path = save(obj=fig)
webbrowser.open_new_tab("file://" + path)
def test_map_groups_to_params_group_none():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = None
params["name"] = ["a", "b", "c", "d"]
params.index = ["a", "b", "c", "d"]
expected = {}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_not_none():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = [None, "A", "B", "B"]
params.index = ["a", "b", "c", "d"]
params["name"] = ["a", "b", "c", "d"]
expected = {"A": ["b"], "B": ["c", "d"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_int_index():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params.index = ["0", "1", "2", "3"]
params["name"] = ["0", "1", "2", "3"]
params["group"] = [None, "A", "B", "B"]
expected = {"A": ["1"], "B": ["2", "3"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
def test_map_groups_to_params_group_multi_index():
params = pd.DataFrame()
params["value"] = [0, 1, 2, 3]
params["group"] = [None, "A", "B", "B"]
params["ind1"] = ["beta", "beta", "cutoff", "cutoff"]
params["ind2"] = ["edu", "exp", 1, 2]
params.set_index(["ind1", "ind2"], inplace=True)
params["name"] = ["beta_edu", "beta_exp", "cutoff_1", "cutoff_2"]
expected = {"A": ["beta_exp"], "B": ["cutoff_1", "cutoff_2"]}
res = monitoring._map_groups_to_params(params)
assert expected == res
| 33.454545 | 87 | 0.651359 | 492 | 3,680 | 4.605691 | 0.254065 | 0.007944 | 0.038835 | 0.060018 | 0.365843 | 0.304943 | 0.281995 | 0.269197 | 0.23654 | 0.23654 | 0 | 0.042943 | 0.183696 | 3,680 | 109 | 88 | 33.761468 | 0.711385 | 0.043207 | 0 | 0.294118 | 0 | 0 | 0.134188 | 0.008547 | 0 | 0 | 0 | 0 | 0.070588 | 1 | 0.094118 | false | 0 | 0.117647 | 0 | 0.223529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf4baa42be0a8b370a59f50ec488aa38196a832 | 8,533 | py | Python | SOSAT/constraints/DITF_constraint.py | pnnl/SOSAT | 610f99e0bb80f2f5e7836e7e3b6b816e029838bb | [
"BSD-3-Clause"
] | null | null | null | SOSAT/constraints/DITF_constraint.py | pnnl/SOSAT | 610f99e0bb80f2f5e7836e7e3b6b816e029838bb | [
"BSD-3-Clause"
] | 1 | 2021-03-22T18:59:05.000Z | 2021-03-22T18:59:05.000Z | SOSAT/constraints/DITF_constraint.py | pnnl/SOSAT | 610f99e0bb80f2f5e7836e7e3b6b816e029838bb | [
"BSD-3-Clause"
] | null | null | null | from logging import log
import numpy as np
from numpy import ma
import pint
from .constraint_base import StressConstraint
units = pint.UnitRegistry()
class DITFConstraint(StressConstraint):
"""
A class used to constrain the stress state by the existence or non
existence of drilling-induced tensile fractures (DITF) at the
location being analyzed. Depending on the mud and formation
temperatures, mud weights, and rock strength and whether or
not significant mud losses were observed, if DITF's exist it
generally indicates that the maximum horizontal stress is much
larger than the minimum principal stress.
Attributes
----------
No public attributes
Parameters
----------
DITF_exists : bool
Indication whether or not DITF exist
mud_pressure_dist : subclass of `scipy.stats.rv_continuous`
The probability distribution for the maximum mud pressure
experienced by the relevant section of borehole from the time
that the well was drilled until the log used to identify the
presence or absence of breakouts was run; mud pressure should
be specified in the same pressure unit as is used for UCS and
Young's modulus, but this can be any unit as specified though
the optional `pressure_unit` parameter, which defaults to 'Pa';
conversion from mud weight must be performed by the user of
this class
mud_temperature_dist : subclass of `scipy.stats.rv_continuous`
The probability distribution for the minimum mud temperature;
the minimum value is of interest rather than the average value
since the formation of a DITF is governed by the minimum
value only
tensile_strength_dist : subclass of `scipy.stats.rv_continuous`
The probability distribution for minimum the minimum tensile
strength in the zone being analyzed. DITFs will form at the
weakest portion of the well for a given stress state, so
whether they form or not is dependent on the minimum tensile
strength rather than an average representative value
formation_temperature : float
Formation temperature, which is taken as deterministic since
it is usually not highly uncertain
YM : float
Formation Young's Modulus, which is taken as deterministic
since the formation of DITF is only weakly dependent
on this parameter; should be specified in the same pressure
unit as is used for mud pressure and Young's modulus, but
this can be any unit as specified though the optional
`pressure_unit` parameter, which defaults to 'Pa'
PR : float
Formation Poisson's Ratio, which is taken as deterministic
since the formation of DITFs is only weakly dependent
on this parameter
CTE : float
Formation coefficient of thermal expansion, which is taken
as deterministic since the formation of DITF is only
weakly dependent on this parameter
pressure_unit : str, optional
The unit used for UCS and Young's modulus; should be a unit
recognized by `pint.UnitRegistry`; defaults to 'Pa'
temperature_unit :str, optional
The unit used to specify the mud temperature distribution and
the formation temperature; should be a unit recognized by
`pint.UnitRegistry`; defaults to degrees C ('degC')
Notes
-----
While this class allows users to use any probability distribution
that derives from the `scipy.stats.rv_continuous` class for the mud
temperature, pressure, and formation tensile strength, users are
cautioned against using any distribution that has finite
probability density for negative parameter values, since negative
strength values are not physically meaningful. Therefore, lognormal
distributions are more appropriate than a normal distribution, for
example.
"""
def __init__(self,
DITF_exists,
mud_pressure_dist,
mud_temperature_dist,
tensile_strength_dist,
formation_temperature,
YM,
PR,
CTE,
pressure_unit='Pa'):
"""
Constructor method
"""
self._DITF_exists = DITF_exists
self._mud_pressure_dist = mud_pressure_dist
self._mud_temperature_dist = mud_temperature_dist
self._tensile_strength_dist = tensile_strength_dist
self._formation_temperature = formation_temperature
self._YM = YM * units(pressure_unit)
self._PR = PR
self._CTE = CTE
self._pressure_unit = pressure_unit
def loglikelihood(self, ss):
"""
Computes the likelihood of each stress state given the presence
or absence of DITFs, formation and mud properties specified.
Parameters
----------
ss: `SOSAT.StressState` object
StressState object containing the stress states
over which the likelihood is to be evaluated
Returns
-------
Numpy MaskedArray
The returned object is a Numpy MaskedArray containing the
likelihood for each stress `ss`. The returned array is
masked identically to `ss.shmin_grid`
"""
# compute stress with balanced mud and no temperature difference
sig_nominal = 3.0 * ss.shmin_grid - ss.shmax_grid \
- 2.0 * ss.pore_pressure
# compute thermoelastic factor
TEF = self._CTE * self._YM / (1.0 - self._PR)
# since all temperature-based quantities in the class are
# assumed to be consistent, we do not include pint temperature
# units explicitly the way we do for pressure/stress. This means
# that TEF will only have pressure units. We convert it to
# ss.stress_units here to avoid repeated conversions inside the
# Monte Carlo loop
TEF = TEF.to(ss.stress_unit).magnitude
# use a Monte Carlo sampling scheme to evaluate the probability
# of a DITF forming
NDITF = ma.zeros(np.shape(ss.shmin_grid), dtype=np.int32)
PDITF_new = ma.zeros(np.shape(ss.shmin_grid), dtype=np.float64)
Ntotal = 0
converged = False
iter = 0
while not converged:
# perform 500 iterations at a time and then see if the
# probabiliity has changed meaningfully
for i in range(0, 500):
mud_pressure_i = self._mud_pressure_dist.rvs() \
* units(self._pressure_unit)
# convert to the stress unit of ss
mud_pressure_i = mud_pressure_i \
.to(ss.stress_unit).magnitude
# no unit conversion is needed since all members of
# this calss should have consistent temperature units
mud_temperature_i = self._mud_temperature_dist.rvs()
TS_i = self._tensile_strength_dist.rvs() \
* units(self._pressure_unit)
# convert to stress unit of ss
TS_i = TS_i.to(ss.stress_unit).magnitude
deltaP = mud_pressure_i - ss.pore_pressure
deltaT = mud_temperature_i - self._formation_temperature
DITF = sig_nominal - deltaP - TEF * deltaT + TS_i
NDITF[DITF < 0.0] += 1
iter += 1
Ntotal += 500
if iter > 2:
PDITF_old = PDITF_new
PDITF_new = NDITF / Ntotal
err = ma.MaskedArray.max(PDITF_new - PDITF_old)
if err < 0.01:
converged = True
print("DITF Monte Carlo iteration converged after ",
iter,
" iterations")
# return the most updated estimate for the likelihood of
# DITF formation at each stress state
if self._DITF_exists:
with np.errstate(divide='ignore'):
loglikelihood = np.log(PDITF_new)
return loglikelihood
else:
# we should change this to do the calculation using
# log probabilities and np.log1p to improve numerical
# precision when PDITF_new is close to 1.0
with np.errstate(divide='ignore'):
loglikelihood = np.log1p(- PDITF_new)
return loglikelihood
| 42.665 | 72 | 0.639752 | 1,061 | 8,533 | 5.035815 | 0.290292 | 0.024705 | 0.014037 | 0.01647 | 0.243496 | 0.230956 | 0.207 | 0.179487 | 0.165637 | 0.14505 | 0 | 0.005801 | 0.313137 | 8,533 | 199 | 73 | 42.879397 | 0.905818 | 0.57846 | 0 | 0.086957 | 0 | 0 | 0.021635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.072464 | 0 | 0.144928 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf71bef67e422b4b23484d576fe9789969c135a | 1,968 | py | Python | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 17 | 2015-12-13T23:11:31.000Z | 2020-07-19T00:40:18.000Z | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 8 | 2016-02-22T19:42:56.000Z | 2016-07-13T10:58:04.000Z | regtests/bench/fannkuch.py | secureosv/pythia | 459f9e2bc0bb2da57e9fa8326697d9ef3386883a | [
"BSD-3-Clause"
] | 3 | 2016-04-11T20:34:31.000Z | 2021-03-12T10:33:02.000Z | # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Sokolov Yura
# modified by Tupteq
# modified by hartsantler 2014
from time import clock
from runtime import *
DEFAULT_ARG = 9
def main():
times = []
for i in range(4):
t0 = clock()
res = fannkuch(DEFAULT_ARG)
#print( 'fannkuch flips:', res)
tk = clock()
times.append(tk - t0)
avg = sum(times) / len(times)
print(avg)
def fannkuch(n):
count = list(range(1, n+1))
perm1 = list(range(n))
perm = list(range(n))
max_flips = 0
m = n-1
r = n
check = 0
#print('--------')
#print perm1
#print('________')
while True:
if check < 30:
check += 1
while r != 1:
count[r-1] = r
r -= 1
if perm1[0] != 0 and perm1[m] != m:
#print '>perm 1:', perm1
perm = perm1[:]
#print '>perm:', perm
flips_count = 0
k = perm[0]
#while k: ## TODO fix for dart
while k != 0:
#print 'flip', k
#perm[:k+1] = perm[k::-1]
assert k < n
assert k < len(perm)
tmp = perm[k::-1]
assert len(tmp) <= len(perm)
#print 'tmp:', tmp
#raise RuntimeError('x')
## slice assignment in python
## allows for the end slice index
## to be greater than the length
#assert k+1 < len(perm) ## not always true!
perm[:k+1] = tmp
assert len(perm) < n+1
#print 'k+1:', k+1
#print 'len perm:', len(perm)
#print 'len tmp:', len(tmp)
assert k+1 <= len(perm)
flips_count += 1
k = perm[0]
#print 'k=', k
if flips_count > 1:
#print 'breaking...'
break
if flips_count > max_flips:
max_flips = flips_count
do_return = True
while r != n:
item = perm1.pop(0)
## python allows for the insertion index
## to be greater than the length of the array.
#assert r < len(perm1) ## not always true!
perm1.insert(r, item)
count[r] -= 1
if count[r] > 0:
do_return = False
break
r += 1
if do_return:
return max_flips
main() | 18.055046 | 49 | 0.57876 | 306 | 1,968 | 3.650327 | 0.30719 | 0.014324 | 0.021486 | 0.021486 | 0.078782 | 0.051925 | 0.051925 | 0 | 0 | 0 | 0 | 0.034868 | 0.271341 | 1,968 | 109 | 50 | 18.055046 | 0.744073 | 0.369411 | 0 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009174 | 0.089286 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.089286 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf80f1fb25644f88ec626aba4649014ffc1feb3 | 2,114 | py | Python | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | lzc/wavelets.py | joker-xii/plant-potential | 4a3e5f2b4755456f058dfc4c235231a14ffbc169 | [
"MIT"
] | null | null | null | import pywt
import pandas as pd
import math
import numpy as np
import matplotlib.pyplot as plt
from lzc.config import *
def read_data(raw, length=SPLIT_SIZE, max_len=MAX_LENGTH):
raw_data = pd.read_csv(raw).iloc[:, 0].values
raw_data = raw_data[:max_len]
sure_value = math.floor(len(raw_data) / length) * length
# print("sure of", sure_value, len(raw_data))
# crop data
raw_data = raw_data[:sure_value]
# split data to length
dds = np.array_split(raw_data, (len(raw_data) / length))
return dds, raw_data
def plot(y,title =""):
plt.title(title)
x = np.linspace(0, len(y) - 1, len(y))
plt.plot(x, y)
plt.show()
def get_transformed(data, func):
retCA = []
retCD = []
for i in data:
# print(len(i), "Fuck!")
cA = np.pad(cA, (0, len(i) - len(cA)), mode='constant')
cD = np.pad(cD, (0, len(i) - len(cD)), mode='constant')
retCA = retCA + cA.tolist()
retCD = retCD + cD.tolist()
return retCA, retCD
def plot_each(data, func):
(cA, cD) = pywt.dwt(data[0], func)
plot(cA,'cA of DWTUnit('+func+")")
plot(cD,'cD of DWTUnit('+func+")")
def to_wavs(fname, max_len=MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
df = pd.DataFrame()
df["basic"] = rd
for i in WAVELETS:
print(i)
ca, cd = get_transformed(datas, i)
df[i + "_cA"] = ca
df[i + "_cD"] = cd
df.to_csv(fname + "_dwt300.csv", float_format='%.3f')
def show_wav(fname, max_len = MAX_LENGTH, attr='csv'):
datas, rd = read_data(fname + "." + attr, max_len=max_len)
plot(datas[0],'input')
for i in WAVELETS:
plot_each(datas,i)
if __name__ == '__main__':
# to_wavs("olddata/m0", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m1", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs("olddata/m2", max_len=OLD_DATA_LEN, attr='txt')
# to_wavs('0m')
# to_wavs('1m')
# to_wavs('2m')
# print(len(pywt.wavelist(kind='discrete')))
# for i in pywt.wavelist(kind='discrete'):
# print(i)
show_wav('1m')
| 28.186667 | 63 | 0.60123 | 334 | 2,114 | 3.610778 | 0.275449 | 0.054726 | 0.037313 | 0.037313 | 0.188226 | 0.188226 | 0.188226 | 0.188226 | 0.188226 | 0.164179 | 0 | 0.011023 | 0.227531 | 2,114 | 74 | 64 | 28.567568 | 0.727495 | 0.191107 | 0 | 0.083333 | 0 | 0 | 0.056014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.291667 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf9f4aa12fe31bd7225f696824684dfd9cbfba0 | 1,646 | py | Python | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
from training import misc
from metrics import metric_base
def create_initial_pkl(
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
tf_config = {}, # Options for tflib.init_tf().
config_id = "config-f", # config-f is the only one tested ...
num_channels = 3, # number of channels, e.g. 3 for RGB
resolution_h = 1024, # height dimension of real/fake images
resolution_w = 1024, # height dimension of real/fake images
label_size = 0, # number of labels for a conditional model
):
# Initialize dnnlib and TensorFlow.
tflib.init_tf(tf_config)
resolution = resolution_h # training_set.shape[1]
# Construct or load networks.
with tf.device('/gpu:0'):
print('Constructing networks...')
G = tflib.Network('G', num_channels=num_channels, resolution=resolution, label_size=label_size, **G_args)
D = tflib.Network('D', num_channels=num_channels, resolution=resolution, label_size=label_size, **D_args)
Gs = G.clone('Gs')
# Print layers and generate initial image snapshot.
G.print_layers(); D.print_layers()
pkl = 'network-initial-%s-%sx%s-%s.pkl' % (config_id, resolution_w, resolution_h, label_size)
misc.save_pkl((G, D, Gs), pkl)
print("Saving",pkl)
| 41.15 | 113 | 0.631835 | 212 | 1,646 | 4.75 | 0.400943 | 0.053625 | 0.03575 | 0.041708 | 0.188679 | 0.188679 | 0.188679 | 0.119166 | 0.119166 | 0.119166 | 0 | 0.01086 | 0.272783 | 1,646 | 39 | 114 | 42.205128 | 0.830409 | 0.252126 | 0 | 0 | 0 | 0 | 0.06486 | 0.025452 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.3 | 0 | 0.333333 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acf9ff45411cc08d0856bdcb3002fa4bb2aca971 | 12,004 | py | Python | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 6 | 2021-04-12T14:03:55.000Z | 2022-03-08T19:40:36.000Z | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 43 | 2021-04-19T09:46:22.000Z | 2022-03-29T15:13:29.000Z | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 1 | 2021-12-07T08:17:01.000Z | 2021-12-07T08:17:01.000Z | import math
import copy
import itertools
import subprocess
from pathlib import Path
import numpy as np
from loguru import logger
from vimms.Controller.base import Controller
from vimms.Common import *
class FixedScansController(Controller):
"""
A controller which takes a schedule of scans, converts them into tasks in queue
"""
def __init__(self, schedule=None, params=None):
"""
Creates a FixedScansController that accepts a list of schedule of scan parameters
:param schedule: a list of ScanParameter objects
:param params: mass spec advanced parameters, if any
"""
super().__init__(params=params)
self.tasks = None
self.initial_task = None
if schedule is not None and len(schedule) > 0:
# if schedule is provided, set it
self.set_tasks(schedule)
def get_initial_tasks(self):
"""
Returns all the remaining scan parameter objects to be pushed to the mass spec queue
:return: all the remaining tasks
"""
assert self.tasks is not None # the remaining scan parameters in the schedule must have been set
return self.tasks
def get_initial_scan_params(self):
"""
Returns the initial scan parameter object to send when acquisition starts
:return: the initial task
"""
assert self.initial_task is not None # the first scan parameters in the schedule must have been set
return self.initial_task
def set_tasks(self, schedule):
"""
Set the fixed schedule of tasks in this controller
:param schedule: a list of scan parameter objects
:return: None
"""
assert isinstance(schedule, list)
self.initial_task = schedule[0] # used for sending the first scan
self.tasks = schedule[1:] # used for sending all the other scans
def handle_scan(self, scan, current_size, pending_size):
# simply record every scan that we've received, but return no new tasks
logger.debug('Time %f Received %s' % (scan.rt, scan))
self.scans[scan.ms_level].append(scan)
return []
def update_state_after_scan(self, last_scan):
pass
class MS2PlannerController(FixedScansController):
@staticmethod
def mzmine2ms2planner(inpath, outpath):
'''Transform mzmine2 box file to ms2planner default format.'''
records = []
with open(inpath, "r") as f:
fields = {}
for i, name in enumerate(f.readline().split(",")):
if(not name in fields): fields[name] = list()
fields[name].append(i)
mz = fields["row m/z"][0]
rt = fields["row retention time"][0]
charges = next(idxes for fd, idxes in fields.items() if fd.strip().endswith("Peak charge"))
intensities = next(idxes for fd, idxes in fields.items() if fd.strip().endswith("Peak height"))
for ln in f:
sp = ln.split(",")
for charge, intensity in zip(charges, intensities):
records.append([
sp[mz],
str(float(sp[rt]) * 60),
sp[charge],
"1",
sp[intensity]
])
out_headers = ["Mass [m/z]", "retention_time", "charge", "Blank", "Sample"]
with open(outpath, "w+") as f:
f.write(",".join(out_headers) + "\n")
for r in records: f.write(",".join(r) + "\n")
@staticmethod
def minimise_single(x, target):
if(target < 0): return 0
c = int(target // x)
return min(c, c+1, key=lambda c: abs(target - c * x))
@staticmethod
def minimise_distance(target, *args):
'''Solve argmin(a1, a2 ... an)(a1x1 + ... + anxn - t) for non-negative integer a1...an and non-negative reals x1...xn, t using backtracking search.
i.e. Schedule tasks of different fixed lengths s.t. the last task ends as close to the target time as possible.
'''
best_coefficients = (float("inf"), [])
stack = [MS2PlannerController.minimise_single(args[0], target)] if len(args) > 0 else []
while(stack != []):
remainder = target - sum(s * a for s, a in zip(stack, args))
for i in range(len(stack), len(args)):
c = MS2PlannerController.minimise_single(args[i], remainder)
stack.append(c)
remainder -= c * args[i]
dist = abs(remainder)
if(not math.isclose(dist, best_coefficients[0]) and dist < best_coefficients[0]): best_coefficients = (dist, copy.copy(stack))
#if(dist < best_coefficients[0]): best_coefficients = (dist, copy.copy(stack))
#if(dist < best_coefficients[0]):
# if(math.isclose(dist, best_coefficients[0])): print(f"IS CLOSE, DIST: {dist}, CHAMP DIST: {best_coefficients[0]}, STACK: {stack}, CHAMPION: {best_coefficients[1]}")
# best_coefficients = (dist, copy.copy(stack))
stack.pop()
while(stack != [] and stack[-1] <= 0): stack.pop()
if(stack != []): stack[-1] -= 1
return best_coefficients[1]
@staticmethod
def parse_ms2planner(fpath):
schedules = []
fields = ["mz_centre", "mz_isolation", "duration", "rt_start", "rt_end", "intensity", "apex_rt", "charge"]
with open(fpath, "r") as f:
for path in f:
schedules.append([])
for scan in path.strip().split("\t")[1:]:
schedules[-1].append(dict(zip(fields, map(float, scan.split(" ")))))
return schedules
@staticmethod
def sched_dict2params(schedule, scan_duration_dict):
'''Scan_duration_dict matches the format of MS scan_duration_dict with _fixed_ scan lengths.'''
time, new_sched = 0, []
srted = sorted(schedule, key=lambda s: s["rt_start"])
print("Schedule times: {}".format([s["rt_start"] for s in srted]))
print(f"NUM SCANS IN SCHEDULE FILE: {len(schedule)}")
#new_sched.append(get_default_scan_params())
#scan_duration_dict = {1: 0.2, 2: 0.2}
id_count = INITIAL_SCAN_ID
for ms2 in srted:
filler = MS2PlannerController.minimise_distance(ms2["rt_start"] - time, scan_duration_dict[1], scan_duration_dict[2])
print(f"filler_scans: {filler}")
for i in range(filler[0]):
sp = get_default_scan_params()
new_sched.append(sp)
id_count += 1
for i in range(filler[1]):
#print(f"sid: {id_count}")
new_sched.append(get_dda_scan_param(0, 0.0, id_count, ms2["mz_isolation"] * 2, 0.0, 0.0))
id_count += 1
new_sched.append(get_dda_scan_param(ms2["mz_centre"], 0.0, id_count, ms2["mz_isolation"] * 2, 0.0, 0.0))
id_count += 1
times = [time, scan_duration_dict[1] * filler[0], scan_duration_dict[2] * filler[1]]
time += sum(c * scan_duration_dict[i+1] for i, c in enumerate(filler)) + scan_duration_dict[2]
print(f"Start time: {times[0]}, MS1 duration: {times[1]}, MS2 duration: {times[2]}, End time: {time}")
print(f"schedule_length: {len(new_sched)}")
print(f"Durations: {scan_duration_dict}")
return new_sched
@staticmethod
def from_fullscan(ms2planner_dir,
fullscan_file,
fullscan_mzmine_table,
out_file,
intensity_threshold,
intensity_ratio,
num_injections,
intensity_accu,
restriction,
isolation,
delay,
min_rt,
max_rt,
scan_duration_dict,
params=None,
cluster_method="kNN",
userpython="python"):
converted = os.path.join(os.path.dirname(out_file), "mzmine2ms2planner.txt")
MS2PlannerController.mzmine2ms2planner(fullscan_mzmine_table, converted)
subprocess.run(
[
userpython,
os.path.join(ms2planner_dir, "path_finder.py"),
"curve",
converted,
#os.path.join(ms2planner_dir, "test", "Blank_to_Sample_mrgd.csv"),
out_file,
str(intensity_threshold),
str(intensity_ratio),
str(num_injections),
"-infile_raw", str(fullscan_file),
"-intensity_accu", str(intensity_accu),
"-restriction", str(restriction[0]), str(restriction[1]),
"-isolation", str(isolation),
"-delay", str(delay),
"-min_scan", str(min_rt),
"-max_scan", str(max_rt),
"-cluster", str(cluster_method)
]
)
schedules = [MS2PlannerController.sched_dict2params(sch, scan_duration_dict) for sch in MS2PlannerController.parse_ms2planner(out_file)]
with open(os.path.join(os.path.dirname(out_file), "scan_params.txt"), "w+") as f:
for i, schedule in enumerate(schedules):
f.write(f"SCHEDULE {i}\n\n")
f.write("".join(f"SCAN {j}: {scan}\n\n" for j, scan in enumerate(schedule)))
return [MS2PlannerController(schedule=schedule, params=params) for schedule in schedules]
class MatchingController(FixedScansController):
@staticmethod
def from_matching(matching, isolation_width, params=None):
return [MatchingController(schedule=schedule, params=params) for schedule in matching.make_schedules(isolation_width)]
class MultiIsolationController(Controller):
def __init__(self, N, isolation_width=DEFAULT_ISOLATION_WIDTH, params=None):
super().__init__(params=params)
assert N > 1
self.N = N
self.isolation_width = isolation_width
self.mz_tol = 10
self.rt_tol = 15
def _make_scan_order(self, N):
# makes a list of tuples, each saying which precuror idx in the sorted
# list should be in which MS2 scan
initial_idx = range(N)
scan_order = []
for L in range(1, len(initial_idx) + 1):
for subset in itertools.combinations(initial_idx, L):
scan_order.append(subset)
return scan_order
def _process_scan(self, scan):
# if there's a previous ms1 scan to process
new_tasks = []
fragmented_count = 0
if self.scan_to_process is not None:
mzs = self.scan_to_process.mzs
intensities = self.scan_to_process.intensities
rt = self.scan_to_process.rt
idx = np.argsort(intensities)[::-1]
precursor_scan_id = self.scan_to_process.scan_id
scan_order = self._make_scan_order(min(self.N, len(mzs)))
for subset in scan_order:
mz = []
intensity = []
for s in subset:
mz.append(mzs[idx[s]])
intensity.append(mzs[idx[s]])
dda_scan_params = self.get_ms2_scan_params(mz, intensity, precursor_scan_id, self.isolation_width,
self.mz_tol, self.rt_tol)
new_tasks.append(dda_scan_params)
self.current_task_id += 1
ms1_scan_params = self.get_ms1_scan_params()
self.current_task_id += 1
self.next_processed_scan_id = self.current_task_id
new_tasks.append(ms1_scan_params)
return new_tasks
def update_state_after_scan(self, scan):
pass
| 43.02509 | 181 | 0.572809 | 1,435 | 12,004 | 4.622997 | 0.211847 | 0.023515 | 0.031354 | 0.018993 | 0.162345 | 0.130841 | 0.101146 | 0.071601 | 0.062557 | 0.062557 | 0 | 0.014722 | 0.320976 | 12,004 | 278 | 182 | 43.179856 | 0.799166 | 0.161363 | 0 | 0.086957 | 0 | 0.004831 | 0.06785 | 0.00213 | 0 | 0 | 0 | 0 | 0.019324 | 1 | 0.082126 | false | 0.009662 | 0.043478 | 0.004831 | 0.198068 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
acfe5dc7af8834b4d1adabbd93a270ec23ea7675 | 845 | py | Python | docs/conf.py | ghuntley/rules_haskell | adc3503387fbb54173dc4b4f21ae0aefe33759a4 | [
"Apache-2.0"
] | 222 | 2017-11-06T09:01:12.000Z | 2022-03-28T08:24:22.000Z | docs/conf.py | ghuntley/rules_haskell | adc3503387fbb54173dc4b4f21ae0aefe33759a4 | [
"Apache-2.0"
] | 1,168 | 2017-11-19T07:43:13.000Z | 2022-03-31T12:40:39.000Z | docs/conf.py | ghuntley/rules_haskell | adc3503387fbb54173dc4b4f21ae0aefe33759a4 | [
"Apache-2.0"
] | 94 | 2017-11-17T22:46:37.000Z | 2022-03-15T00:16:56.000Z | project = 'rules_haskell'
copyright = '2018, The rules_haskell authors'
source_suffix = '.rst'
extensions = [
'sphinx.ext.graphviz',
'sphinx.ext.todo',
]
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'show_powered_by': False,
'github_user': 'tweag',
'github_repo': 'rules_haskell',
'github_banner': True,
'github_type': "star",
'show_related': False,
'note_bg': '#FFF59C',
}
html_show_sphinx = False
todo_include_todos = True
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass).
latex_documents = [
(master_doc, 'rules_haskell.tex', 'rules\\_haskell Documentation',
'Tweag I/O', 'manual'),
]
| 20.119048 | 70 | 0.68284 | 103 | 845 | 5.330097 | 0.708738 | 0.10929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008596 | 0.173965 | 845 | 41 | 71 | 20.609756 | 0.777937 | 0.145562 | 0 | 0 | 0 | 0 | 0.411683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a0028c34a0e33cdbd3e5a8d6d5c0ae1cbaa0c93 | 2,557 | py | Python | craigslistings/config.py | fgregg/listings_scraper | bd90299537c9e34d5bd22d310780f269872f1789 | [
"MIT"
] | null | null | null | craigslistings/config.py | fgregg/listings_scraper | bd90299537c9e34d5bd22d310780f269872f1789 | [
"MIT"
] | 4 | 2016-05-13T23:01:25.000Z | 2016-05-13T23:01:52.000Z | craigslistings/config.py | fgregg/listings_scraper | bd90299537c9e34d5bd22d310780f269872f1789 | [
"MIT"
] | null | null | null | max_packet_size = 1048576 # Set in my.conf
byte_encoding = 5 # UTF-8 Uses up to four bytes
string_chunk = int(max_packet_size/byte_encoding)
cities = {"newyork" : ('New York', 'NY'),
"losangeles" : ('Los Angeles', 'CA'),
"chicago" : ('Chicago', 'IL'),
"houston" : ('Houston', 'TX'),
"philadelphia" : ('Philadelphia', 'PA'),
"phoenix" : ('Phoenix', 'AZ'),
"sanantonio" : ('San Antonio', 'TX'),
"sandiego" : ('San Diego', 'CA'),
"dallas" : ('Dallas', 'TX'),
"jacksonville" : ('Jacksonville', 'FL'),
"indianapolis" : ('Indianapolis', 'IN'),
"sanfrancisco" : ('San Francisco', 'CA'),
"austin" : ('Austin', 'TX'),
"columbus" : ('Columbus', 'OH'),
"charlotte" : ('Charlotte', 'NC'),
"detroit" : ('Detroit', 'MI'),
"elpaso" : ('El Paso', 'TX'),
"memphis" : ('Memphis', 'TN'),
"baltimore" : ('Baltimore', 'MD'),
"boston" : ('Boston', 'MA'),
"seattle" : ('Seattle', 'WA'),
"dc" : ('Washington', 'DC'),
"nashville" : ('Nashville', 'TN'),
"denver" : ('Denver', 'CO'),
"louisville" : ('Louisville', 'KY'),
"milwaukee" : ('Milwaukee', 'WI'),
"portland" : ('Portland', 'OR'),
"lasvegas" : ('Las Vegas', 'NV'),
"oklahomacity" : ('Oklahoma City', 'OK'),
"albuquerque" : ('Albuquerque', 'NM'),
"tucson" : ('Tucson', 'AZ'),
"fresno": ('Fresno', 'CA'),
"sacramento" : ('Sacramento', 'CA'),
"kansascity" : ('Kansas City', 'MO'),
"atlanta" : ('Atlanta', 'GA'),
"cosprings" : ('Colorado Springs', 'CO'),
"omaha" : ('Omaha', 'NE'),
"raleigh" : ('Raleigh', 'NC'),
"miami" : ('Miami', 'FL'),
"cleveland" : ('Cleveland', 'OH'),
"tulsa" : ('Tulsa', 'OK'),
"minneapolis" : ('Minneapolis', 'MN'),
"wichita" : ('Wichita', 'KS'),
"knoxville" : ('Knoxville', 'TN'),
"asheville" : ('Asheville', 'NC')
}
std_feeds = [["sublet", "http://%s.craigslist.org/sub/index.rss"],
["room", "http://%s.craigslist.org/roo/index.rss"],
["apartment" , "http://%s.craigslist.org/apa/index.rss"]
]
ny_feeds = [["sublet", "http://%s.craigslist.org/sub/index.rss"],
["room", "http://%s.craigslist.org/roo/index.rss"],
["apartment" , "http://%s.craigslist.org/abo/index.rss"]
]
| 41.918033 | 69 | 0.461478 | 224 | 2,557 | 5.227679 | 0.5625 | 0.025619 | 0.076857 | 0.092229 | 0.170794 | 0.170794 | 0.170794 | 0.170794 | 0.170794 | 0.170794 | 0 | 0.00502 | 0.298788 | 2,557 | 60 | 70 | 42.616667 | 0.648076 | 0.016426 | 0 | 0.035088 | 0 | 0 | 0.441879 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a011810b1aa15948bd07ab362906562c0540151 | 662 | py | Python | appfl/protos/utils.py | markxiao/APPFL | 2940f01695b84d8239368e5d1fc3133c7f7a05ae | [
"MIT"
] | null | null | null | appfl/protos/utils.py | markxiao/APPFL | 2940f01695b84d8239368e5d1fc3133c7f7a05ae | [
"MIT"
] | null | null | null | appfl/protos/utils.py | markxiao/APPFL | 2940f01695b84d8239368e5d1fc3133c7f7a05ae | [
"MIT"
] | null | null | null | import numpy as np
from .federated_learning_pb2 import DataBuffer
from .federated_learning_pb2 import TensorRecord
def construct_tensor_record(name, nparray):
return TensorRecord(name=name, data_shape=list(nparray.shape), data_bytes=nparray.tobytes(order='C'))
def proto_to_databuffer(proto, max_size=(2*1024*1024)):
data_bytes = proto.SerializeToString()
data_bytes_size = len(data_bytes)
message_size = data_bytes_size if max_size > data_bytes_size else max_size
for i in range(0,data_bytes_size,message_size):
chunk = data_bytes[i:i+message_size]
msg = DataBuffer(size=message_size, data_bytes=chunk)
yield msg
| 36.777778 | 105 | 0.767372 | 98 | 662 | 4.887755 | 0.438776 | 0.169102 | 0.10856 | 0.100209 | 0.125261 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 0.148036 | 662 | 17 | 106 | 38.941176 | 0.828014 | 0 | 0 | 0 | 0 | 0 | 0.001511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0.076923 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a016af0909efbd7f966d50e6e0e6974238ed8fa | 7,843 | py | Python | h/models/document/_document.py | BearerPipelineTest/h | 6b8b6600f5995463ca60ded9e4c82053d606f4de | [
"BSD-2-Clause"
] | 2,103 | 2015-01-07T12:47:49.000Z | 2022-03-29T02:38:25.000Z | h/models/document/_document.py | BearerPipelineTest/h | 6b8b6600f5995463ca60ded9e4c82053d606f4de | [
"BSD-2-Clause"
] | 4,322 | 2015-01-04T17:18:01.000Z | 2022-03-31T17:06:02.000Z | h/models/document/_document.py | admariner/h | 25ef1b8d94889df86ace5a084f1aa0effd9f4e25 | [
"BSD-2-Clause"
] | 389 | 2015-01-24T04:10:02.000Z | 2022-03-28T08:00:16.000Z | import logging
from datetime import datetime
from urllib.parse import urlparse
import sqlalchemy as sa
from h.db import Base, mixins
from h.models import Annotation
from h.models.document._exceptions import ConcurrentUpdateError
from h.models.document._meta import create_or_update_document_meta
from h.models.document._uri import DocumentURI, create_or_update_document_uri
from h.util.uri import normalize as uri_normalize
log = logging.getLogger(__name__)
class Document(Base, mixins.Timestamps):
__tablename__ = "document"
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
#: The denormalized value of the first DocumentMeta record with type title.
title = sa.Column("title", sa.UnicodeText())
#: The denormalized value of the "best" http(s) DocumentURI for this Document.
web_uri = sa.Column("web_uri", sa.UnicodeText())
# FIXME: This relationship should be named `uris` again after the
# dependency on the annotator-store is removed, as it clashes with
# making the Postgres and Elasticsearch interface of a Document
# object behave the same way.
document_uris = sa.orm.relationship(
"DocumentURI", backref="document", order_by="DocumentURI.updated.desc()"
)
meta = sa.orm.relationship(
"DocumentMeta", backref="document", order_by="DocumentMeta.updated.desc()"
)
def __repr__(self):
return f"<Document {self.id}>"
def update_web_uri(self):
"""
Update the value of the self.web_uri field.
Set self.web_uri to the "best" http(s) URL from self.document_uris.
Set self.web_uri to None if there's no http(s) DocumentURIs.
"""
def first_http_url(type_=None):
"""
Return this document's first http(s) URL of the given type.
Return None if this document doesn't have any http(s) URLs of the
given type.
If no type is given just return this document's first http(s)
URL, or None.
"""
for document_uri in self.document_uris:
uri = document_uri.uri
if type_ is not None and document_uri.type != type_:
continue
if urlparse(uri).scheme not in ["http", "https"]:
continue
return document_uri.uri
self.web_uri = (
first_http_url(type_="self-claim")
or first_http_url(type_="rel-canonical")
or first_http_url()
)
@classmethod
def find_by_uris(cls, session, uris):
"""Find documents by a list of uris."""
query_uris = [uri_normalize(u) for u in uris]
matching_claims = (
session.query(DocumentURI)
.filter(
DocumentURI.uri_normalized.in_(query_uris) # pylint: disable=no-member
)
.distinct(DocumentURI.document_id)
.subquery()
)
return session.query(Document).join(matching_claims)
@classmethod
def find_or_create_by_uris( # pylint: disable=too-many-arguments
cls, session, claimant_uri, uris, created=None, updated=None
):
"""
Find or create documents from a claimant uri and a list of uris.
It tries to find a document based on the claimant and the set of uris.
If none can be found it will return a new document with the claimant
uri as its only document uri as a self-claim. It is the callers
responsibility to create any other document uris.
"""
finduris = [claimant_uri] + uris
documents = cls.find_by_uris(session, finduris)
if not documents.count():
doc = Document(created=created, updated=updated)
DocumentURI(
document=doc,
claimant=claimant_uri,
uri=claimant_uri,
type="self-claim",
created=created,
updated=updated,
)
session.add(doc)
try:
session.flush()
except sa.exc.IntegrityError as err:
raise ConcurrentUpdateError("concurrent document creation") from err
return documents
def merge_documents(session, documents, updated=None):
"""
Take a list of documents and merges them together. It returns the new master document.
The support for setting a specific value for the `updated` should only
be used during the Postgres migration. It should be removed afterwards.
"""
if updated is None:
updated = datetime.utcnow()
master = documents[0]
duplicates = documents[1:]
duplicate_ids = [doc.id for doc in duplicates]
log.info("Merging %s documents", len(duplicate_ids) + 1)
for doc in duplicates:
for _ in range(len(doc.document_uris)):
uri = doc.document_uris.pop()
uri.document = master
uri.updated = updated
for _ in range(len(doc.meta)):
meta = doc.meta.pop()
meta.document = master
meta.updated = updated
try: # pylint:disable=too-many-try-statements
session.flush()
session.query(Annotation).filter(
Annotation.document_id.in_(duplicate_ids)
).update({Annotation.document_id: master.id}, synchronize_session="fetch")
session.query(Document).filter(Document.id.in_(duplicate_ids)).delete(
synchronize_session="fetch"
)
except sa.exc.IntegrityError as err:
raise ConcurrentUpdateError("concurrent document merges") from err
return master
def update_document_metadata( # pylint: disable=too-many-arguments
session,
target_uri,
document_meta_dicts,
document_uri_dicts,
created=None,
updated=None,
):
"""
Create and update document metadata from the given annotation.
Document, DocumentURI and DocumentMeta objects will be created, updated
and deleted in the database as required by the given annotation and
document meta and uri dicts.
:param target_uri: the target_uri of the annotation from which the document metadata comes from
:param document_meta_dicts: the document metadata dicts that were derived
by validation from the "document" dict that the client posted
:type document_meta_dicts: list of dicts
:param document_uri_dicts: the document URI dicts that were derived by
validation from the "document" dict that the client posted
:type document_uri_dicts: list of dicts
:param created: Date and time value for the new document records
:param updated: Date and time value for the new document records
:returns: the matched or created document
:rtype: h.models.Document
"""
if created is None:
created = datetime.utcnow()
if updated is None:
updated = datetime.utcnow()
documents = Document.find_or_create_by_uris(
session,
target_uri,
[u["uri"] for u in document_uri_dicts],
created=created,
updated=updated,
)
if documents.count() > 1:
document = merge_documents(session, documents, updated=updated)
else:
document = documents.first()
document.updated = updated
for document_uri_dict in document_uri_dicts:
create_or_update_document_uri(
session=session,
document=document,
created=created,
updated=updated,
**document_uri_dict,
)
document.update_web_uri()
for document_meta_dict in document_meta_dicts:
create_or_update_document_meta(
session=session,
document=document,
created=created,
updated=updated,
**document_meta_dict,
)
return document
| 32.8159 | 99 | 0.643759 | 969 | 7,843 | 5.069143 | 0.223942 | 0.035831 | 0.019544 | 0.028502 | 0.239821 | 0.134365 | 0.134365 | 0.119707 | 0.106678 | 0.063518 | 0 | 0.000709 | 0.281015 | 7,843 | 238 | 100 | 32.953782 | 0.870367 | 0.304348 | 0 | 0.22695 | 0 | 0 | 0.050445 | 0.010244 | 0 | 0 | 0 | 0.004202 | 0 | 1 | 0.049645 | false | 0 | 0.070922 | 0.007092 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a03b0b4b278059869a25f28237d155a190ad1bc | 3,243 | py | Python | servicecatalog_puppet/workflow/service_control_policies/do_terminate_service_control_policies_task.py | mtrampic/aws-service-catalog-puppet | faa6ebe15929dc0040b85e5fd3313161821daa36 | [
"Apache-2.0"
] | 2 | 2019-04-12T23:28:46.000Z | 2019-04-15T15:35:04.000Z | servicecatalog_puppet/workflow/service_control_policies/do_terminate_service_control_policies_task.py | mtrampic/aws-service-catalog-puppet | faa6ebe15929dc0040b85e5fd3313161821daa36 | [
"Apache-2.0"
] | null | null | null | servicecatalog_puppet/workflow/service_control_policies/do_terminate_service_control_policies_task.py | mtrampic/aws-service-catalog-puppet | faa6ebe15929dc0040b85e5fd3313161821daa36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import functools
import luigi
from servicecatalog_puppet import constants
from servicecatalog_puppet.workflow import dependency
from servicecatalog_puppet.workflow.service_control_policies import (
service_control_policies_base_task,
get_or_create_policy_task,
)
from servicecatalog_puppet.workflow.manifest import manifest_mixin
class DoTerminateServiceControlPoliciesTask(
service_control_policies_base_task.ServiceControlPoliciesBaseTask,
manifest_mixin.ManifestMixen,
dependency.DependenciesMixin,
):
service_control_policy_name = luigi.Parameter()
puppet_account_id = luigi.Parameter()
region = luigi.Parameter()
account_id = luigi.Parameter()
ou_name = luigi.Parameter()
content = luigi.DictParameter()
description = luigi.Parameter()
requested_priority = luigi.IntParameter()
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"service_control_policy_name": self.service_control_policy_name,
"region": self.region,
"account_id": self.account_id,
"ou_name": self.ou_name,
"cache_invalidator": self.cache_invalidator,
}
def requires(self):
return dict(
policy=get_or_create_policy_task.GetOrCreatePolicyTask(
puppet_account_id=self.puppet_account_id,
region=self.region,
policy_name=self.service_control_policy_name,
policy_description=self.description,
policy_content=self.content,
tags=self.manifest.get(constants.SERVICE_CONTROL_POLICIES)
.get(self.service_control_policy_name)
.get("tags", []),
)
)
def api_calls_used(self):
return [
f"organizations.detach_policy_{self.region}",
]
@functools.lru_cache(maxsize=32)
def target(self):
with self.organizations_policy_client() as orgs:
if self.account_id != "":
return self.account_id
else:
if str(self.ou_name).startswith("/"):
return orgs.convert_path_to_ou(self.ou_name)
else:
return self.ou_name
def has_policy_attached(self, orgs):
paginator = orgs.get_paginator("list_policies_for_target")
for page in paginator.paginate(
TargetId=self.target(), Filter="SERVICE_CONTROL_POLICY"
):
for policy in page.get("Policies", []):
if policy.get("Name") == self.service_control_policy_name:
return True
return False
def run(self):
with self.organizations_policy_client() as orgs:
self.info("Ensuring attachments for policies")
policy_id = self.load_from_input("policy").get("Id")
if self.has_policy_attached(orgs):
orgs.detach_policy(PolicyId=policy_id, TargetId=self.target())
self.write_output("terminated")
else:
self.write_output("skipped")
| 35.25 | 78 | 0.647549 | 347 | 3,243 | 5.755043 | 0.314121 | 0.077116 | 0.070105 | 0.072108 | 0.196294 | 0.131197 | 0.115173 | 0.043065 | 0 | 0 | 0 | 0.003368 | 0.267653 | 3,243 | 91 | 79 | 35.637363 | 0.837474 | 0.033303 | 0 | 0.092105 | 0 | 0 | 0.078569 | 0.03641 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.078947 | 0.039474 | 0.381579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a096ae72f6696576069a0f41a103ea60b77363d | 1,525 | py | Python | surrogate/crossover/cxOnePoint.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | 3 | 2021-01-06T03:01:18.000Z | 2022-03-21T03:02:55.000Z | surrogate/crossover/cxOnePoint.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | surrogate/crossover/cxOnePoint.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Quan Pan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: Apache License, Version 2.0
# Create: 2016-12-02
import numpy as np
def cxOnePoint(var1, var2):
"""Executes a one point crossover on the input :term:`sequence` individuals.
The two individuals are modified in place. The resulting individuals will
respectively have the length of the other.
:param var1: The first variable participating in the crossover.
:param var2: The second variable participating in the crossover.
:returns: A tuple of two variables.
This function uses the :func:`~random.randint` function from the
python base :mod:`random` module.
"""
size = min(len(var1), len(var2))
# size = min(var1.size, var2.size)
cxpoint = np.random.randint(1, size - 1)
var1[cxpoint:], var2[cxpoint:] = var2[cxpoint:], var1[cxpoint:]
# var1[cxpoint:], var2[cxpoint:] = var2[cxpoint:].copy(), var1[cxpoint:].copy()
return var1, var2
| 38.125 | 83 | 0.717377 | 222 | 1,525 | 4.927928 | 0.536036 | 0.054845 | 0.065814 | 0.038391 | 0.164534 | 0.060329 | 0 | 0 | 0 | 0 | 0 | 0.032985 | 0.184918 | 1,525 | 39 | 84 | 39.102564 | 0.847144 | 0.796066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a0ab3467b0e8d79f90d067c5362b24cc643f7bf | 3,855 | py | Python | src/passthrough/label_tools.py | ExoMars-PanCam/passthrough | 7ff9f82e4c85c40a4f2dab20bbee1c46d79d61a5 | [
"MIT"
] | 2 | 2021-05-04T04:30:37.000Z | 2021-05-04T12:17:22.000Z | src/passthrough/label_tools.py | ExoMars-PanCam/passthrough | 7ff9f82e4c85c40a4f2dab20bbee1c46d79d61a5 | [
"MIT"
] | 4 | 2021-05-04T16:56:49.000Z | 2021-05-12T17:00:07.000Z | src/passthrough/label_tools.py | ExoMars-PanCam/passthrough | 7ff9f82e4c85c40a4f2dab20bbee1c46d79d61a5 | [
"MIT"
] | null | null | null | """PDS4 label interrogation and manipulation functionality"""
__all__ = [
"LabelLike",
"PDS_NS_PREFIX",
"ATTR_PATHS",
"labellike_to_etree",
"add_default_ns",
"is_populated",
"PathManipulator",
]
from pathlib import Path
from typing import Dict, Optional, Union
from lxml import etree
try:
from pds4_tools.reader.general_objects import StructureList
from pds4_tools.reader.label_objects import Label
except ModuleNotFoundError:
StructureList = None
Label = None
if None not in (StructureList, Label):
LabelLike = Union[etree._ElementTree, StructureList, Label, Path, str]
else:
LabelLike = Union[etree._ElementTree, Path, str]
PDS_NS_PREFIX = "pds"
# Common PDS4 attribute XPath shorthands
ATTR_PATHS = {
"lid": "//pds:Identification_Area/pds:logical_identifier",
"start": "//pds:Time_Coordinates/pds:start_date_time",
"stop": "//pds:Time_Coordinates/pds:stop_date_time",
# "type": "//msn:Mission_Information/msn:product_type_name",
# "sub_instrument": "//psa:Sub-Instrument/psa:identifier",
# "exposure_duration": "//img:Exposure/img:exposure_duration",
}
def labellike_to_etree(labellike: LabelLike) -> etree._ElementTree:
if isinstance(labellike, etree._ElementTree):
return labellike
if isinstance(labellike, Path):
labellike = str(labellike.expanduser().resolve())
# continue to handling of str
if isinstance(labellike, str):
return etree.parse(labellike)
base_url = None
if StructureList is not None and isinstance(labellike, StructureList):
prefix = "Processing label: "
log = labellike.read_in_log.split("\n")[0]
if log.startswith(prefix):
# *should* always resolve to the abs path of the XML label
base_url = log[len(prefix) :]
labellike = labellike.label
# continue to handling of Label
if Label is not None and isinstance(labellike, Label):
return etree.fromstring(
labellike.to_string(unmodified=True), base_url=base_url
).getroottree()
raise TypeError(
f"unknown label format {type(labellike)}, expected one of {LabelLike}"
)
def add_default_ns(nsmap: Dict[Optional[str], str]) -> Dict[str, str]:
nsmap[PDS_NS_PREFIX] = nsmap[None]
del nsmap[None]
return nsmap
def is_populated(elem: etree._Element):
if elem.text is not None and bool(elem.text.strip()):
return True
if (
"xsi" in elem.nsmap
and elem.attrib.get(f"{{{elem.nsmap['xsi']}}}nil", False) == "true"
):
return True
return False
class PathManipulator:
def __init__(self, nsmap: dict, default_prefix: str = PDS_NS_PREFIX):
self._nsmap = nsmap
self._default_prefix = default_prefix
def clark_to_prefix(self, path: str):
"""
Transforms paths provided in Clark notation (`{nsURI}tag`) to XPath-valid prefix
notation (`nsPrefix:tag`).
:param path: path string in Clark notation (e.g. ElementPath)
:return: path string in prefix notation
"""
for prefix, uri in self._nsmap.items():
path = path.replace(f"{{{uri}}}", f"{prefix}:")
return path
def prefix_default_ns(self, path: str):
segments = []
for segment in path.split("/"):
if segment.startswith("*"):
raise RuntimeError(f"path segment not yet supported: '{segment}'")
elif ":" in segment: # assume : marks the end of a prefix in this segment
segments.append(segment)
elif len(segment): # empty segments occur for abs. paths or //
segments.append(f"{self._default_prefix}:{segment}")
segments.append("/")
else:
segments.pop() # remove trailing /
return "".join(segments)
| 33.232759 | 88 | 0.649027 | 463 | 3,855 | 5.2527 | 0.336933 | 0.039063 | 0.018092 | 0.014803 | 0.025493 | 0.025493 | 0 | 0 | 0 | 0 | 0 | 0.001701 | 0.237613 | 3,855 | 115 | 89 | 33.521739 | 0.825791 | 0.183917 | 0 | 0.048193 | 0 | 0 | 0.147211 | 0.061284 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072289 | false | 0 | 0.060241 | 0 | 0.253012 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a0c6398b24a0691cc03d510952090b45b49adee | 5,299 | py | Python | tests/integ/sagemaker/lineage/test_artifact.py | longyuzhao/sagemaker-python-sdk | 5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f | [
"Apache-2.0"
] | 1,690 | 2017-11-29T20:13:37.000Z | 2022-03-31T12:58:11.000Z | tests/integ/sagemaker/lineage/test_artifact.py | longyuzhao/sagemaker-python-sdk | 5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f | [
"Apache-2.0"
] | 2,762 | 2017-12-04T05:18:03.000Z | 2022-03-31T23:40:11.000Z | tests/integ/sagemaker/lineage/test_artifact.py | longyuzhao/sagemaker-python-sdk | 5c6c8e9a8a414627caa7e1d3d80d44cdc2a1c01f | [
"Apache-2.0"
] | 961 | 2017-11-30T16:44:03.000Z | 2022-03-30T23:12:09.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains code to test SageMaker ``Artifacts``"""
from __future__ import absolute_import
import datetime
import logging
import time
import pytest
from sagemaker.lineage import artifact
from tests.integ.sagemaker.lineage.helpers import retry
def test_create_delete(artifact_obj):
# fixture does create and then delete, this test ensures it happens at least once
assert artifact_obj.artifact_arn
def test_create_delete_with_association(artifact_obj_with_association):
# fixture does create and then delete, this test ensures it happens at least once
assert artifact_obj_with_association.artifact_arn
def test_save(artifact_obj, sagemaker_session):
artifact_obj.properties = {"k3": "v3"}
artifact_obj.properties_to_remove = ["k1"]
artifact_obj.save()
loaded = artifact.Artifact.load(
artifact_arn=artifact_obj.artifact_arn, sagemaker_session=sagemaker_session
)
assert {"k3": "v3"} == loaded.properties
def test_load(artifact_obj, sagemaker_session):
assert artifact_obj.artifact_name
logging.info(f"loading {artifact_obj.artifact_name}")
loaded = artifact.Artifact.load(
artifact_arn=artifact_obj.artifact_arn, sagemaker_session=sagemaker_session
)
assert artifact_obj.artifact_arn == loaded.artifact_arn
def test_list(artifact_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
artifact_names = [art.artifact_name for art in artifact_objs]
for sort_order in ["Ascending", "Descending"]:
artifact_names_listed = [
artifact_listed.artifact_name
for artifact_listed in artifact.Artifact.list(
created_after=now - slack,
created_before=now + slack,
sort_by="CreationTime",
sort_order=sort_order,
sagemaker_session=sagemaker_session,
)
if artifact_listed.artifact_name in artifact_names
]
if sort_order == "Descending":
artifact_names_listed = artifact_names_listed[::-1]
assert artifact_names == artifact_names_listed
# sanity check
assert artifact_names
def test_list_by_type(artifact_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
expected_name = list(
filter(lambda x: x.artifact_type == "SDKIntegrationTestType2", artifact_objs)
)[0].artifact_name
artifact_names = [art.artifact_name for art in artifact_objs]
artifact_names_listed = [
artifact_listed.artifact_name
for artifact_listed in artifact.Artifact.list(
created_after=now - slack,
artifact_type="SDKIntegrationTestType2",
sagemaker_session=sagemaker_session,
)
if artifact_listed.artifact_name in artifact_names
]
assert len(artifact_names_listed) == 1
assert artifact_names_listed[0] == expected_name
def test_downstream_trials(trial_associated_artifact, trial_obj, sagemaker_session):
# allow trial components to index, 30 seconds max
def validate():
for i in range(3):
time.sleep(10)
trials = trial_associated_artifact.downstream_trials(
sagemaker_session=sagemaker_session
)
logging.info(f"Found {len(trials)} downstream trials.")
if len(trials) > 0:
break
assert len(trials) == 1
assert trial_obj.trial_name in trials
retry(validate, num_attempts=3)
@pytest.mark.timeout(30)
def test_tag(artifact_obj, sagemaker_session):
tag = {"Key": "foo", "Value": "bar"}
artifact_obj.set_tag(tag)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=artifact_obj.artifact_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert actual_tags[0] == tag
@pytest.mark.timeout(30)
def test_tags(artifact_obj, sagemaker_session):
tags = [{"Key": "foo1", "Value": "bar1"}]
artifact_obj.set_tags(tags)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=artifact_obj.artifact_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert [actual_tags[-1]] == tags
| 33.751592 | 92 | 0.701076 | 672 | 5,299 | 5.303571 | 0.282738 | 0.061728 | 0.042649 | 0.037037 | 0.500842 | 0.485971 | 0.454545 | 0.43266 | 0.43266 | 0.43266 | 0 | 0.009429 | 0.219475 | 5,299 | 156 | 93 | 33.967949 | 0.852273 | 0.203623 | 0 | 0.398058 | 0 | 0 | 0.049821 | 0.01764 | 0 | 0 | 0 | 0 | 0.145631 | 1 | 0.097087 | false | 0 | 0.067961 | 0 | 0.165049 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a0d4230edf032b1e43ef100b2abb90af972e5fb | 1,962 | py | Python | App.py | dsmarcot2018/imdb-poster-maker | de3e4769b69cc2fe23abf7a4198afa5c78007533 | [
"MIT"
] | null | null | null | App.py | dsmarcot2018/imdb-poster-maker | de3e4769b69cc2fe23abf7a4198afa5c78007533 | [
"MIT"
] | null | null | null | App.py | dsmarcot2018/imdb-poster-maker | de3e4769b69cc2fe23abf7a4198afa5c78007533 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
import requests
import json
app = Flask(__name__)
@app.route('/')
@app.route('/<show_title>'
'<show_image_height>'
'<show_image_imageUrl>'
'<show_image_width>'
'show_rank'
'show_yr')
def overlay(show_title=None,
show_image_height=None,
show_image_imageUrl=None,
show_image_width=None,
show_rank=None,
show_yr=None):
url = "https://imdb8.p.rapidapi.com/auto-complete"
try_variable = True
while try_variable:
try:
query = input("What show would you like a poster for: ")
querystring = {"q": query}
headers = {
'x-rapidapi-key': "fb82ae7848msh91722b54eeeec8cp17c717jsn08b7a3ab507e",
'x-rapidapi-host': "imdb8.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
load_variable = json.loads(response.text)
show_title = str(load_variable["d"][0]["l"])
show_image_height = str(load_variable["d"][0]["i"]["height"])
show_image_imageUrl = str(load_variable["d"][0]["i"]["imageUrl"])
show_image_width = str(load_variable["d"][0]["i"]["width"])
show_rank = str(load_variable["d"][0]["rank"])
show_yr = str(load_variable["d"][0]["yr"])
try_variable = False
except KeyError:
print("Please enter a valid show\n")
return render_template('Overlay.html',
show_title=show_title,
show_image_height=show_image_height,
show_image_imageUrl=show_image_imageUrl,
show_image_width=show_image_width,
show_rank=show_rank,
show_yr=show_yr)
if __name__ == '__main__':
app.run()
| 30.184615 | 88 | 0.555046 | 216 | 1,962 | 4.722222 | 0.342593 | 0.132353 | 0.088235 | 0.094118 | 0.279412 | 0.229412 | 0.147059 | 0.080392 | 0 | 0 | 0 | 0.02568 | 0.325178 | 1,962 | 64 | 89 | 30.65625 | 0.744713 | 0 | 0 | 0 | 0 | 0 | 0.180428 | 0.036188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.06383 | 0 | 0.106383 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a0d661f1dae839871dabe2d04bb61bb6c6dcc1f | 1,691 | py | Python | App-Installer.py | m-jishnu/Microsoft-Store-App-Installer | 019e6b74835fc2b032b278e7d867bdb1923c42a1 | [
"MIT"
] | null | null | null | App-Installer.py | m-jishnu/Microsoft-Store-App-Installer | 019e6b74835fc2b032b278e7d867bdb1923c42a1 | [
"MIT"
] | null | null | null | App-Installer.py | m-jishnu/Microsoft-Store-App-Installer | 019e6b74835fc2b032b278e7d867bdb1923c42a1 | [
"MIT"
] | null | null | null | import os
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from windows import set_dpi_awareness
import webbrowser
def callback(url):
webbrowser.open_new(url)
set_dpi_awareness()
try:
def select_file():
filename = filedialog.askopenfilename(initialdir="/",
title="Select a File")
os.system(f'powershell.exe Add-AppPackage "{filename}"')
# Create the root window
window = tk.Tk()
# Set window title
window.title('file Installer')
# icon set
# window.iconbitmap(path)
label = ttk.Label(window,
text="file Installer V1.1")
label.config(font=("Courier", 12))
button_explore = ttk.Button(window,
text="Select File",
command=select_file)
button_exit = ttk.Button(window,
text="Exit",
command=window.destroy)
label_credits = ttk.Label(window,
text="By TechoZ")
label_credits.config(font=("Courier", 12))
label.grid(column=0, row=0, padx=100, pady=10)
button_explore.grid(column=0, row=1, padx=10, pady=10)
button_exit.grid(column=0, row=2, padx=10, pady=2)
label_credits.grid(column=0, row=3, padx=10,
sticky='E', columnspan=True)
label_credits.bind(
"<Button-1>", lambda e: callback("http://youtube.com/c/techoz_youtube_channel"))
window.mainloop()
except:
import traceback
traceback.print_exc()
input("Press Enter to end...")
| 26.015385 | 89 | 0.562389 | 193 | 1,691 | 4.834197 | 0.450777 | 0.042872 | 0.04716 | 0.060021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025528 | 0.328208 | 1,691 | 64 | 90 | 26.421875 | 0.795775 | 0.042578 | 0 | 0 | 0 | 0 | 0.130323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.175 | 0 | 0.225 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1102dcc914f81241efa01b8276e048cbbebf9d | 4,590 | py | Python | mangum/adapter.py | kita99/mangum | 961ff7cf3b9fa70ccbca188b13530546fd3359b6 | [
"MIT"
] | null | null | null | mangum/adapter.py | kita99/mangum | 961ff7cf3b9fa70ccbca188b13530546fd3359b6 | [
"MIT"
] | null | null | null | mangum/adapter.py | kita99/mangum | 961ff7cf3b9fa70ccbca188b13530546fd3359b6 | [
"MIT"
] | null | null | null | import logging
from contextlib import ExitStack
from typing import (
Any,
ContextManager,
Callable,
Dict,
Optional,
TYPE_CHECKING,
)
from .exceptions import ConfigurationError
from .handlers import AbstractHandler
from .protocols import HTTPCycle, WebSocketCycle, LifespanCycle
from .backends import WebSocket
from .types import ASGIApp, WsRequest
if TYPE_CHECKING: # pragma: no cover
from awslambdaric.lambda_context import LambdaContext
DEFAULT_TEXT_MIME_TYPES = [
"text/",
"application/json",
"application/javascript",
"application/xml",
"application/vnd.api+json",
]
logger = logging.getLogger("mangum")
class Mangum:
"""
Creates an adapter instance.
* **app** - An asynchronous callable that conforms to version 3.0 of the ASGI
specification. This will usually be an ASGI framework application instance.
* **lifespan** - A string to configure lifespan support. Choices are `auto`, `on`,
and `off`. Default is `auto`.
* **api_gateway_base_path** - Base path to strip from URL when using a custom
domain name.
* **text_mime_types** - A list of MIME types to include with the defaults that
should not return a binary response in API Gateway.
* **dsn** - A connection string required to configure a supported WebSocket backend.
* **api_gateway_endpoint_url** - A string endpoint url to use for API Gateway when
sending data to WebSocket connections. Default is to determine this automatically.
* **api_gateway_region_name** - A string region name to use for API Gateway when
sending data to WebSocket connections. Default is `AWS_REGION` environment variable.
"""
app: ASGIApp
lifespan: str = "auto"
dsn: Optional[str] = None
api_gateway_endpoint_url: Optional[str] = None
api_gateway_region_name: Optional[str] = None
connect_hook: Optional[Callable] = None
disconnect_hook: Optional[Callable] = None
def __init__(
self,
app: ASGIApp,
lifespan: str = "auto",
dsn: Optional[str] = None,
api_gateway_endpoint_url: Optional[str] = None,
api_gateway_region_name: Optional[str] = None,
connect_hook: Optional[Callable] = None,
disconnect_hook: Optional[Callable] = None,
**handler_kwargs: Dict[str, Any]
) -> None:
self.app = app
self.lifespan = lifespan
self.dsn = dsn
self.api_gateway_endpoint_url = api_gateway_endpoint_url
self.api_gateway_region_name = api_gateway_region_name
self.handler_kwargs = handler_kwargs
self.connect_hook = connect_hook
self.disconnect_hook = disconnect_hook
if self.lifespan not in ("auto", "on", "off"):
raise ConfigurationError(
"Invalid argument supplied for `lifespan`. Choices are: auto|on|off"
)
if connect_hook and not callable(connect_hook):
raise Exception("Invalid connect_hook supplied. Must be a callable")
if disconnect_hook and not callable(disconnect_hook):
raise Exception("Invalid disconnect_hook supplied. Must be callable")
def __call__(self, event: dict, context: "LambdaContext") -> dict:
logger.debug("Event received.")
with ExitStack() as stack:
if self.lifespan != "off":
lifespan_cycle: ContextManager = LifespanCycle(self.app, self.lifespan)
stack.enter_context(lifespan_cycle)
handler = AbstractHandler.from_trigger(
event, context, **self.handler_kwargs
)
request = handler.request
if isinstance(request, WsRequest):
api_gateway_endpoint_url = (
self.api_gateway_endpoint_url or handler.api_gateway_endpoint_url
)
websocket = WebSocket(
dsn=self.dsn,
api_gateway_endpoint_url=api_gateway_endpoint_url,
api_gateway_region_name=self.api_gateway_region_name,
connect_hook=self.connect_hook,
disconnect_hook=self.disconnect_hook,
)
websocket_cycle = WebSocketCycle(
request, handler.message_type, handler.connection_id, websocket
)
response = websocket_cycle(self.app, handler.body)
else:
http_cycle = HTTPCycle(request)
response = http_cycle(self.app, handler.body)
return handler.transform_response(response)
| 36.428571 | 88 | 0.654248 | 518 | 4,590 | 5.596525 | 0.291506 | 0.072439 | 0.06209 | 0.072439 | 0.261469 | 0.216971 | 0.213522 | 0.19662 | 0.167644 | 0.167644 | 0 | 0.000597 | 0.269717 | 4,590 | 125 | 89 | 36.72 | 0.86426 | 0.207625 | 0 | 0 | 0 | 0 | 0.084337 | 0.012889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022472 | false | 0 | 0.101124 | 0 | 0.224719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a111caf55597e56c7d387d6a2d92cdf594238ea | 3,439 | py | Python | peregrinearb/tests/bellmannx_test.py | lyn716/peregrine | 5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9 | [
"MIT"
] | null | null | null | peregrinearb/tests/bellmannx_test.py | lyn716/peregrine | 5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9 | [
"MIT"
] | null | null | null | peregrinearb/tests/bellmannx_test.py | lyn716/peregrine | 5b1f6a839bf4a86198ad85f527b04b9a34ea7ab9 | [
"MIT"
] | null | null | null | from unittest import TestCase
from peregrinearb import bellman_ford_multi, multi_digraph_from_json, multi_digraph_from_dict, \
calculate_profit_ratio_for_path, bellman_ford
import json
import networkx as nx
def graph_from_dict(graph_dict):
if 'graph_type' not in graph_dict:
raise ValueError('graph_dict must contain key "graph_type"')
if graph_dict['graph_type'] == 'MultiDiGraph':
return multi_digraph_from_dict(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'MultiGraph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'], multigraph_input=True)
elif graph_dict['graph_type'] == 'DiGraph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'Graph':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
elif graph_dict['graph_type'] == 'other':
return nx.from_dict_of_dicts(graph_dict['graph_dict'])
else:
raise ValueError("the value for 'graph_type' in graph_dict is not of the accepted values.")
def digraph_from_multi_graph_json(file_name):
"""
file_name should hold a JSON which represents a MultiDigraph where there is a maximum of two edges each in opposing
directions between each node
:param file_name:
"""
with open(file_name) as f:
data = json.load(f)
G = nx.DiGraph()
for node in data.keys():
neighbors = data[node]
for neighbor, v in neighbors.items():
for key, data_dict in v.items():
G.add_edge(node, neighbor, **data_dict)
return G
class TestBellmanFordMultiGraph(TestCase):
def test_path_beginning_equals_end(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node)
for path in paths:
if path:
self.assertEqual(path[0], path[-1])
def test_positive_ratio(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node)
for path in paths:
if path:
# assert that the path is a negative weight cycle
ratio = calculate_profit_ratio_for_path(new_graph, path)
# python float precision may round some numbers to 1.0.
self.assertGreaterEqual(ratio, 1.0)
def test_loop_from_source(self):
graph = multi_digraph_from_json('test_multigraph.json')
for node in graph:
new_graph, paths = bellman_ford_multi(graph, node, loop_from_source=True)
for path in paths:
if path:
self.assertEqual(path[0], path[-1])
self.assertEqual(node, path[0])
class TestBellmannx(TestCase):
def test_ensure_profit_yields_profit(self):
graph = nx.DiGraph()
graph.add_edge(0, 1, weight=4)
graph.add_edge(1, 0, weight=3)
graph.add_edge(1, 2, weight=-1)
graph.add_edge(2, 3, weight=-1)
graph.add_edge(3, 1, weight=-1)
paths = bellman_ford(graph, 0, loop_from_source=True, ensure_profit=True)
for path in paths:
weight = 0
for i in range(len(path) - 1):
weight += graph[path[i]][path[i + 1]]['weight']
self.assertLess(weight, 0)
| 36.978495 | 119 | 0.640012 | 469 | 3,439 | 4.439232 | 0.245203 | 0.082133 | 0.067243 | 0.043228 | 0.379923 | 0.314601 | 0.314601 | 0.314601 | 0.314601 | 0.295389 | 0 | 0.01149 | 0.266066 | 3,439 | 92 | 120 | 37.380435 | 0.813391 | 0.077057 | 0 | 0.294118 | 0 | 0 | 0.103591 | 0 | 0 | 0 | 0 | 0 | 0.073529 | 1 | 0.088235 | false | 0 | 0.058824 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a132862707004f44e3168b4c3953ddf92017152 | 2,388 | py | Python | examples/example_jumping_robot/src/jr_graph_builder.py | danbarla/GTDynamics | 0448b359aff9e0e784832666e4048ee01c8b082d | [
"BSD-2-Clause"
] | null | null | null | examples/example_jumping_robot/src/jr_graph_builder.py | danbarla/GTDynamics | 0448b359aff9e0e784832666e4048ee01c8b082d | [
"BSD-2-Clause"
] | null | null | null | examples/example_jumping_robot/src/jr_graph_builder.py | danbarla/GTDynamics | 0448b359aff9e0e784832666e4048ee01c8b082d | [
"BSD-2-Clause"
] | null | null | null | """
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file jr_graph_builder.py
* @brief Create factor graphs for the jumping robot.
* @author Yetong Zhang
"""
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
sys.path.insert(0,currentdir)
import gtdynamics as gtd
import gtsam
from gtsam import noiseModel, NonlinearFactorGraph
import numpy as np
from jumping_robot import Actuator, JumpingRobot
from actuation_graph_builder import ActuationGraphBuilder
from robot_graph_builder import RobotGraphBuilder
class JRGraphBuilder:
""" Class that constructs factor graphs for a jumping robot. """
def __init__(self):
"""Initialize the graph builder, specify all noise models."""
self.robot_graph_builder = RobotGraphBuilder()
self.actuation_graph_builder = ActuationGraphBuilder()
def collocation_graph(self, jr: JumpingRobot, step_phases: list):
""" Create a factor graph containing collocation constraints. """
graph = self.actuation_graph_builder.collocation_graph(jr, step_phases)
graph.push_back(self.robot_graph_builder.collocation_graph(jr, step_phases))
# add collocation factors for time
for time_step in range(len(step_phases)):
phase = step_phases[time_step]
k_prev = time_step
k_curr = time_step+1
dt_key = gtd.PhaseKey(phase).key()
time_prev_key = gtd.TimeKey(k_prev).key()
time_curr_key = gtd.TimeKey(k_curr).key()
time_col_cost_model = self.robot_graph_builder.graph_builder.opt().time_cost_model
gtd.AddTimeCollocationFactor(graph, time_prev_key, time_curr_key,
dt_key, time_col_cost_model)
return graph
def dynamics_graph(self, jr: JumpingRobot, k: int) -> NonlinearFactorGraph:
""" Create a factor graph containing dynamcis constraints for
the robot, actuators and source tank at a certain time step
"""
graph = self.actuation_graph_builder.dynamics_graph(jr, k)
graph.add(self.robot_graph_builder.dynamics_graph(jr, k))
return graph
| 37.3125 | 94 | 0.708961 | 298 | 2,388 | 5.466443 | 0.362416 | 0.088398 | 0.052179 | 0.051565 | 0.18539 | 0.083487 | 0.04911 | 0 | 0 | 0 | 0 | 0.008502 | 0.211893 | 2,388 | 63 | 95 | 37.904762 | 0.857067 | 0.247069 | 0 | 0.058824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.235294 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a13cce4b5bf49cca71d1857149e23c28b79c998 | 5,660 | py | Python | yumewatari/gateware/phy_rx.py | whitequark/Yumewatari | 0981d8c832850c72745808c022dc63944a7164bc | [
"0BSD"
] | 49 | 2018-11-09T20:56:33.000Z | 2022-03-18T15:17:21.000Z | yumewatari/gateware/phy_rx.py | whitequark/Yumewatari | 0981d8c832850c72745808c022dc63944a7164bc | [
"0BSD"
] | null | null | null | yumewatari/gateware/phy_rx.py | whitequark/Yumewatari | 0981d8c832850c72745808c022dc63944a7164bc | [
"0BSD"
] | 2 | 2019-03-03T17:59:56.000Z | 2020-02-06T08:23:00.000Z | from migen import *
from .serdes import K, D
from .protocol import *
from .struct import *
__all__ = ["PCIePHYRX"]
class PCIePHYRX(Module):
def __init__(self, lane):
self.error = Signal()
self.comma = Signal()
self.ts = Record(ts_layout)
###
self.comb += lane.rx_align.eq(1)
self._tsY = Record(ts_layout) # previous TS received
self._tsZ = Record(ts_layout) # TS being received
self.sync += If(self.error, self._tsZ.valid.eq(0))
ts_id = Signal(9)
ts_inv = Signal()
self.submodules.parser = Parser(
symbol_size=9,
word_size=lane.ratio,
reset_rule="COMMA",
layout=[
("data", 8),
("ctrl", 1),
])
self.comb += [
self.parser.reset.eq(~lane.rx_valid),
self.parser.i.eq(lane.rx_symbol),
self.error.eq(self.parser.error)
]
self.parser.rule(
name="COMMA",
cond=lambda symbol: symbol.raw_bits() == K(28,5),
succ="TSn-LINK/SKP-0",
action=lambda symbol: [
self.comma.eq(1),
NextValue(self._tsZ.valid, 1),
NextValue(self._tsY.raw_bits(), self._tsZ.raw_bits()),
]
)
self.parser.rule(
name="TSn-LINK/SKP-0",
cond=lambda symbol: symbol.raw_bits() == K(28,0),
succ="SKP-1"
)
self.parser.rule(
name="TSn-LINK/SKP-0",
cond=lambda symbol: symbol.raw_bits() == K(23,7),
succ="TSn-LANE",
action=lambda symbol: [
NextValue(self._tsZ.link.valid, 0)
]
)
self.parser.rule(
name="TSn-LINK/SKP-0",
cond=lambda symbol: ~symbol.ctrl,
succ="TSn-LANE",
action=lambda symbol: [
NextValue(self._tsZ.link.number, symbol.data),
NextValue(self._tsZ.link.valid, 1)
]
)
for n in range(1, 3):
self.parser.rule(
name="SKP-%d" % n,
cond=lambda symbol: symbol.raw_bits() == K(28,0),
succ="COMMA" if n == 2 else "SKP-%d" % (n + 1),
)
self.parser.rule(
name="TSn-LANE",
cond=lambda symbol: symbol.raw_bits() == K(23,7),
succ="TSn-FTS",
action=lambda symbol: [
NextValue(self._tsZ.lane.valid, 0)
]
)
self.parser.rule(
name="TSn-LANE",
cond=lambda symbol: ~symbol.ctrl,
succ="TSn-FTS",
action=lambda symbol: [
NextValue(self._tsZ.lane.number, symbol.data),
NextValue(self._tsZ.lane.valid, 1)
]
)
self.parser.rule(
name="TSn-FTS",
cond=lambda symbol: ~symbol.ctrl,
succ="TSn-RATE",
action=lambda symbol: [
NextValue(self._tsZ.n_fts, symbol.data)
]
)
self.parser.rule(
name="TSn-RATE",
cond=lambda symbol: ~symbol.ctrl,
succ="TSn-CTRL",
action=lambda symbol: [
NextValue(self._tsZ.rate.raw_bits(), symbol.data)
]
)
self.parser.rule(
name="TSn-CTRL",
cond=lambda symbol: ~symbol.ctrl,
succ="TSn-ID0",
action=lambda symbol: [
NextValue(self._tsZ.ctrl.raw_bits(), symbol.data)
]
)
self.parser.rule(
name="TSn-ID0",
cond=lambda symbol: symbol.raw_bits() == D(10,2),
succ="TSn-ID1",
action=lambda symbol: [
NextMemory(ts_id, symbol.raw_bits()),
NextValue(ts_inv, 0),
NextValue(self._tsZ.ts_id, 0),
]
)
self.parser.rule(
name="TSn-ID0",
cond=lambda symbol: symbol.raw_bits() == D(5,2),
succ="TSn-ID1",
action=lambda symbol: [
NextMemory(ts_id, symbol.raw_bits()),
NextValue(ts_inv, 0),
NextValue(self._tsZ.ts_id, 1),
]
)
self.parser.rule(
name="TSn-ID0",
cond=lambda symbol: symbol.raw_bits() == D(21,5),
succ="TSn-ID1",
action=lambda symbol: [
NextMemory(ts_id, symbol.raw_bits()),
NextValue(ts_inv, 1),
]
)
self.parser.rule(
name="TSn-ID0",
cond=lambda symbol: symbol.raw_bits() == D(26,5),
succ="TSn-ID1",
action=lambda symbol: [
NextMemory(ts_id, symbol.raw_bits()),
NextValue(ts_inv, 1),
]
)
for n in range(1, 9):
self.parser.rule(
name="TSn-ID%d" % n,
cond=lambda symbol: symbol.raw_bits() == Memory(ts_id),
succ="TSn-ID%d" % (n + 1)
)
self.parser.rule(
name="TSn-ID9",
cond=lambda symbol: symbol.raw_bits() == Memory(ts_id),
succ="COMMA",
action=lambda symbol: [
NextValue(self.ts.valid, 0),
If(ts_inv,
NextValue(lane.rx_invert, ~lane.rx_invert)
).Elif(self._tsZ.raw_bits() == self._tsY.raw_bits(),
NextValue(self.ts.raw_bits(), self._tsY.raw_bits())
),
NextState("COMMA")
]
)
| 31.797753 | 71 | 0.462367 | 619 | 5,660 | 4.101777 | 0.140549 | 0.137062 | 0.088224 | 0.11343 | 0.692399 | 0.656558 | 0.555337 | 0.496258 | 0.471445 | 0.449783 | 0 | 0.019828 | 0.403004 | 5,660 | 177 | 72 | 31.977401 | 0.731577 | 0.006714 | 0 | 0.391566 | 0 | 0 | 0.050926 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006024 | false | 0 | 0.024096 | 0 | 0.036145 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a14cd7868b0061c5183291d7d5c6d6e9955ef57 | 1,568 | py | Python | lookup_extensions/backends/postgresql/base.py | uncovertruth/django-lookup-extensions | 3a8a57130c9092fc6b2458041084746488720b57 | [
"MIT"
] | 4 | 2018-05-23T08:01:55.000Z | 2019-01-18T00:51:11.000Z | lookup_extensions/backends/postgresql/base.py | uncovertruth/django-lookup-extensions | 3a8a57130c9092fc6b2458041084746488720b57 | [
"MIT"
] | 506 | 2018-02-22T07:52:29.000Z | 2019-11-04T14:26:27.000Z | lookup_extensions/backends/postgresql/base.py | uncovertruth/django-lookup-extensions | 3a8a57130c9092fc6b2458041084746488720b57 | [
"MIT"
] | null | null | null | from django.db.backends.postgresql.base import \
DatabaseWrapper as DjangoDatabaseWrapper
from lookup_extensions.utils import merge_dicts
from .operations import DatabaseOperations
class ExtendedDatabaseWrapperMixin(object):
ops_class = DatabaseOperations
operators = merge_dicts(
DjangoDatabaseWrapper.operators,
{
# For negates
'neexact': '<> %s',
'neiexact': '<> UPPER(%s)',
'necontains': 'NOT LIKE %s',
'neicontains': 'NOT LIKE UPPER(%s)',
'neregex': '!~ %s',
'neiregex': '!~* %s',
'nestartswith': 'NOT LIKE %s',
'neendswith': 'NOT LIKE %s',
'neistartswith': 'NOT LIKE UPPER(%s)',
'neiendswith': 'NOT LIKE UPPER(%s)',
# For exregex
'exregex': '~ %s',
'exiregex': '~* %s',
'neexregex': '!~ %s',
'neexiregex': '!~* %s',
}
)
pattern_ops = merge_dicts(
DjangoDatabaseWrapper.pattern_ops,
{
'necontains': r"NOT LIKE '%%' || {} || '%%'",
'neicontains': r"NOT LIKE '%%' || UPPER({}) || '%%'",
'nestartswith': r"NOT LIKE {} || '%%'",
'neistartswith': r"NOT LIKE UPPER({}) || '%%'",
'neendswith': r"NOT LIKE '%%' || {}",
'neiendswith': r"NOT LIKE '%%' || UPPER({})",
}
)
regex_synonyms = {
'\\<': '[[:<:]]',
'\\>': '[[:>:]]',
}
class DatabaseWrapper(ExtendedDatabaseWrapperMixin, DjangoDatabaseWrapper):
pass
| 30.745098 | 75 | 0.492985 | 121 | 1,568 | 6.322314 | 0.38843 | 0.109804 | 0.094118 | 0.05098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.321429 | 1,568 | 50 | 76 | 31.36 | 0.718985 | 0.014668 | 0 | 0 | 0 | 0 | 0.326848 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.02381 | 0.071429 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a14f82b5e611777a6a63f8b615dfc52398ba19e | 621 | py | Python | asn1tools/codecs/permitted_alphabet.py | cromulencellc/asn1tools | 30eb88e287cc1616903858aa96ee8791a4d7bf1c | [
"MIT"
] | 198 | 2017-08-04T21:49:15.000Z | 2022-03-26T10:11:21.000Z | asn1tools/codecs/permitted_alphabet.py | cromulencellc/asn1tools | 30eb88e287cc1616903858aa96ee8791a4d7bf1c | [
"MIT"
] | 144 | 2017-09-29T12:06:51.000Z | 2022-03-29T13:04:44.000Z | asn1tools/codecs/permitted_alphabet.py | cromulencellc/asn1tools | 30eb88e287cc1616903858aa96ee8791a4d7bf1c | [
"MIT"
] | 73 | 2017-10-09T13:33:28.000Z | 2022-03-11T01:35:22.000Z | """Permitted alphabet.
"""
import string
try:
unichr
except NameError:
unichr = chr
NUMERIC_STRING = ' 0123456789'
PRINTABLE_STRING = (string.ascii_uppercase
+ string.ascii_lowercase
+ string.digits
+ " '()+,-./:=?")
IA5_STRING = ''.join([chr(v) for v in range(128)])
# ud800 - udfff are reserved code points for utf-16 surrogates.
# at this point, do not support code points in supplementary planes.
BMP_STRING = ''.join([unichr(v) for v in range(65536) if v < 0xd800 or v > 0xdfff])
VISIBLE_STRING = ''.join([chr(v) for v in range(32, 127)])
| 23 | 83 | 0.618357 | 81 | 621 | 4.654321 | 0.604938 | 0.079576 | 0.039788 | 0.055703 | 0.164456 | 0.132626 | 0.132626 | 0.132626 | 0 | 0 | 0 | 0.073118 | 0.251208 | 621 | 26 | 84 | 23.884615 | 0.737634 | 0.239936 | 0 | 0 | 0 | 0 | 0.049676 | 0 | 0 | 0 | 0.025918 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a17557a447b6a424e9c591e4508f20003dc956a | 10,383 | py | Python | app/participant/views.py | vicoociv/bread-and-roses | bf53988d670b2a1e19883b394e249be0a1fbe934 | [
"MIT"
] | null | null | null | app/participant/views.py | vicoociv/bread-and-roses | bf53988d670b2a1e19883b394e249be0a1fbe934 | [
"MIT"
] | null | null | null | app/participant/views.py | vicoociv/bread-and-roses | bf53988d670b2a1e19883b394e249be0a1fbe934 | [
"MIT"
] | 1 | 2020-08-04T02:33:08.000Z | 2020-08-04T02:33:08.000Z | import datetime
from flask import abort, flash, redirect, render_template, url_for, request
from flask_login import current_user, login_required
from .forms import NewDonorForm, TodoToAsking, AskingToPledged, PledgedToCompleted
from ..decorators import admin_required
from . import participant
from .. import db
from ..models import Donor, Demographic, DonorStatus, Candidate, User
@participant.route('/<int:part_id>/')
@participant.route('/', defaults={'part_id': None})
@login_required
def index(part_id):
user = current_user
if part_id is not None:
if not current_user.is_admin():
return abort(403)
user = User.query.filter_by(id=part_id).first()
"""Participant dashboard page."""
donors_by_status = {
status.name: Donor.query.filter_by(
user_id=user.id, status=status).all()
for status in DonorStatus
}
def datestring(s):
return s.strftime('%b %d')
def datestring_alt(s):
return s.strftime('%b %d, %Y')
forms_by_donor = {}
for d in Donor.query.filter_by(user_id=user.id).all():
f = None
if d.status == DonorStatus.TODO:
f = TodoToAsking(donor=d.id)
elif d.status == DonorStatus.ASKING:
f = AskingToPledged(donor=d.id)
elif d.status == DonorStatus.PLEDGED:
f = PledgedToCompleted(donor=d.id)
else:
f = PledgedToCompleted(donor=d.id, amount_received=d.amount_received, date_received=d.date_received)
forms_by_donor[d.id] = f
return render_template('participant/index.html',
user=user,
donors_by_status=donors_by_status,
Status=DonorStatus,
datestring=datestring,
datestring_alt=datestring_alt,
part_id=part_id,
forms_by_donor=forms_by_donor,
current_user=current_user)
@participant.route('/profile')
@login_required
def profile():
"""Participant Profile page."""
asking_donors = Donor.query.filter_by(
user_id=current_user.id, status=1).all()
pledged_donors = Donor.query.filter_by(
user_id=current_user.id, status=2).all()
completed_donors = Donor.query.filter_by(
user_id=current_user.id, status=3).all()
todo_donors = Donor.query.filter_by(
user_id=current_user.id, status=0).all()
num_donors = len(completed_donors)
num_asks = len(asking_donors) + len(pledged_donors) + len(completed_donors)
ind_pledged = 0
is_candidate = False
term_participants = []
total_pledged = 0
total_raised = 0
total_num_donors = 0;
if current_user.candidate is not None and current_user.candidate.term_id is not None:
cohort_stats = Candidate.cohort_stats(current_user.candidate.term_id)
participant_stats = current_user.candidate.participant_stats()
amt_donated = current_user.candidate.amount_donated
else:
cohort_stats = {}
cohort_stats["amount_donated"] = "N/A (no cohort assigned)"
cohort_stats["total_donations"] = "N/A (no cohort assigned)"
cohort_stats["total_pledges"] = "N/A (no cohort assigned)"
cohort_stats["donor_count"] = "N/A (no cohort assigned)"
participant_stats = {}
participant_stats["asking_count"] = "N/A (no participant linked)",
participant_stats["todo_count"] = "N/A (no participant linked)",
participant_stats["pledged_count"] = "N/A (no participant linked)",
participant_stats["completed_count"] = "N/A (no participant linked)",
participant_stats["donor_count"] = "N/A (no participant linked)",
participant_stats["total_donations"] = "N/A (no participant linked)",
amt_donated = "N/A"
return render_template('participant/profile.html',
user=current_user,
is_candidate=current_user.candidate is not None,
ind_pledged=amt_donated,
num_asks=participant_stats["asking_count"],
total_todo=participant_stats["todo_count"],
total_pledged=participant_stats["pledged_count"],
total_completed=participant_stats["completed_count"],
total_num_donors=participant_stats["donor_count"],
total_raised=participant_stats["total_donations"],
cohort_raised=cohort_stats["amount_donated"],
cohort_donations=cohort_stats["total_donations"],
cohort_pledges=cohort_stats["total_pledges"],
cohort_donors=cohort_stats["donor_count"],
form=None)
@participant.route('/donor/ask/<int:donor_id>', methods=['POST'])
@login_required
def todo_to_asking(donor_id):
d = Donor.query.filter_by(id=donor_id).first()
part_id = None
if current_user.is_admin() and d.user.id!=current_user.id:
part_id = d.user.id
if d.user != current_user and not current_user.is_admin():
return abort(403)
f = TodoToAsking()
if f.validate_on_submit():
d.status = DonorStatus(int(f.status.data))
d.date_asking = f.date_asking.data
d.amount_asking_for = f.amount_asking_for.data
d.how_asking = f.how_asking.data
db.session.add(d)
db.session.commit()
flash('Successfully moved donor %s to %s.' % (d.first_name, d.status.name.lower()), 'success')
else:
flash('Error filling out form. Did you miss a field?', 'error')
return redirect(url_for('participant.index', part_id=part_id))
@participant.route('/donor/pledge/<int:donor_id>', methods=['POST'])
@login_required
def asking_to_pledged(donor_id):
d = Donor.query.filter_by(id=donor_id).first()
part_id = None
if current_user.is_admin() and d.user.id!=current_user.id:
part_id = d.user.id
if d.user != current_user and not current_user.is_admin():
return abort(403)
f = AskingToPledged()
if f.validate_on_submit():
d.status = DonorStatus(int(f.status.data))
d.pledged = f.pledged.data
d.amount_pledged = f.amount_pledged.data
db.session.add(d)
db.session.commit()
flash('Successfully moved donor %s to %s.' % (d.first_name, d.status.name.lower()), 'success')
else:
for e in f.errors:
flash('Error filling out %s field. %s' % (e.replace('_', ' ').title(), f.errors[e][0]), 'error')
return redirect(url_for('participant.index', part_id=part_id))
@participant.route('/donor/complete/<int:donor_id>', methods=['POST'])
@login_required
@admin_required
def pledged_to_completed(donor_id):
d = Donor.query.filter_by(id=donor_id).first()
part_id = None
if current_user.is_admin() and d.user.id!=current_user.id:
part_id = d.user.id
f = PledgedToCompleted()
if f.validate_on_submit():
d.status = DonorStatus(int(f.status.data))
d.amount_received = f.amount_received.data
d.date_received = f.date_received.data
db.session.add(d)
db.session.commit()
flash('Successfully moved donor %s to %s.' % (d.first_name, d.status.name.lower()), 'success')
else:
for e in f.errors:
flash('Error filling out %s field. %s' % (e.replace('_', ' ').title(), f.errors[e][0]), 'error')
return redirect(url_for('participant.index', part_id=part_id))
@participant.route('/<int:part_id>/donor/<int:donor_id>/_delete')
@participant.route('/donor/<int:donor_id>/_delete', defaults={'part_id': None})
@login_required
def delete_donor(part_id, donor_id):
"""Delete a participant."""
d = Donor.query.filter_by(id=donor_id).first()
if d.user != current_user and not (
current_user.is_admin() and d.user.id==part_id
):
return abort(403)
db.session.delete(d)
db.session.commit()
flash('Successfully deleted donor %s.' % d.first_name, 'success')
return redirect(url_for('participant.index', part_id=part_id))
@participant.route('/donor/<int:donor_id>/edit')
@login_required
def edit_donor(donor_id):
"""Edits a donor."""
d = Donor.query.filter_by(id=donor_id).first()
return redirect(url_for('participant.index'))
@participant.route('/new-donor', defaults={'part_id': None}, methods=['GET', 'POST'])
@participant.route('/<int:part_id>/new-donor', methods=['GET', 'POST'])
@login_required
def new_donor(part_id):
user = current_user
if part_id is not None:
if not current_user.is_admin():
return abort(403)
user = User.query.filter_by(id=part_id).first()
"""Create a new donor."""
form = NewDonorForm()
if form.validate_on_submit():
demographic = Demographic(
race=form.demographic.race.data,
gender=form.demographic.gender.data,
age=form.demographic.age.data,
sexual_orientation=form.demographic.sexual_orientation.data,
soc_class=form.demographic.soc_class.data
)
donor = Donor(
user_id=user.id,
user=user,
first_name=form.first_name.data,
last_name=form.last_name.data,
contact_date=form.contact_date.data,
street_address=form.street_address.data,
city=form.city.data,
state=form.state.data,
zipcode=form.zipcode.data,
phone_number=form.phone_number.data,
email=form.email.data,
notes=form.notes.data,
interested_in_future_gp=form.interested_in_future_gp.data,
want_to_learn_about_brf_guarantees=form.want_to_learn_about_brf_guarantees.data,
interested_in_volunteering=form.interested_in_volunteering.data,
status=DonorStatus.TODO,
amount_pledged=0,
amount_received=0,
amount_asking_for=0,
demographic=demographic
)
db.session.add(donor)
db.session.commit()
flash('Donor {} successfully created'.format(donor.full_name()),
'form-success')
return render_template('participant/new_donor.html', form=form, part_id=part_id)
| 37.348921 | 112 | 0.63421 | 1,321 | 10,383 | 4.750189 | 0.125662 | 0.030598 | 0.026932 | 0.031554 | 0.472351 | 0.436813 | 0.387729 | 0.357291 | 0.29004 | 0.279522 | 0 | 0.003573 | 0.245305 | 10,383 | 277 | 113 | 37.483755 | 0.797218 | 0.005971 | 0 | 0.308036 | 0 | 0 | 0.126964 | 0.027032 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044643 | false | 0 | 0.035714 | 0.008929 | 0.147321 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1b0dcc53e4c376a7325c4a14a7d8d4bfe1cd94 | 9,927 | py | Python | Minesweeper.py | LuizHenriquePy/Minesweeper | 23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568 | [
"MIT"
] | null | null | null | Minesweeper.py | LuizHenriquePy/Minesweeper | 23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568 | [
"MIT"
] | null | null | null | Minesweeper.py | LuizHenriquePy/Minesweeper | 23e4d3a2bcb6ef6c0a05a14bd77ab66284c6a568 | [
"MIT"
] | null | null | null | from random import randint
from tkinter import *
from tkinter.messagebox import showinfo
from functools import partial
NEIGHBORS = [
lambda x, y: (x - 1, y - 1), # top left
lambda x, y: (x - 1, y), # top
lambda x, y: (x - 1, y + 1), # top right
lambda x, y: (x, y - 1), # left
lambda x, y: (x, y + 1), # right
lambda x, y: (x + 1, y - 1), # bottom left
lambda x, y: (x + 1, y), # bottom
lambda x, y: (x + 1, y + 1) # bottom right
]
class Matrix:
def __init__(self, numberOfRows, numberOfColumns, numberOfMines):
self.numberOfRows = numberOfRows
self.numberOfColumns = numberOfColumns
self.numberOfMines = numberOfMines
self.neighbors = NEIGHBORS
def creates_the_matrix(self):
self.matrix = [[0 for x in range(self.numberOfColumns)] for x in range(self.numberOfRows)]
def put_mines_in_the_matrix(self):
while True:
self.minePositions = []
self.creates_the_matrix()
while len(self.minePositions) != self.numberOfMines:
minePosition = [randint(0, self.numberOfRows - 1), randint(0, self.numberOfColumns - 1)]
if minePosition not in self.minePositions:
self.minePositions.append(minePosition)
self.matrix[minePosition[0]][minePosition[1]] = 'M'
if self.checks_if_there_are_accumulated_mines_in_the_matrix(self.matrix):
break
def checks_if_there_are_accumulated_mines_in_the_matrix(self, matrix):
for x in range(self.numberOfRows):
for y in range(self.numberOfColumns):
numberOfMines = 0
numberOfNeighbors = 0
for neighborPosition in self.neighbors:
try:
xN, yN = neighborPosition(x, y)
if xN < 0 or yN < 0:
raise IndexError
numberOfNeighbors += 1
if self.matrix[xN][yN] == 'M':
numberOfMines += 1
except IndexError:
pass
if numberOfNeighbors == numberOfMines:
return False
return True
def put_number_in_the_matrix(self):
for x, y in self.minePositions:
for positionNeighbor in self.neighbors:
try:
xN, yN = positionNeighbor(x, y)
if xN < 0 or yN < 0:
raise IndexError
if self.matrix[xN][yN] != 'M':
self.matrix[xN][yN] += 1
except IndexError:
pass
def main(self):
self.creates_the_matrix()
self.put_mines_in_the_matrix()
self.put_number_in_the_matrix()
return self.matrix
class Minesweeper:
def __init__(self, window, matrix):
self.matrix = matrix
self.x = len(self.matrix)
self.y = len(self.matrix[0])
self.window = window
self.flags = []
self.mines = []
self.neighbors = NEIGHBORS
self.matrixButtons = [[y for y in range(self.y)] for x in range(self.x)]
self.game_creator()
self.window.resizable(0, 0)
self.window.title('Minesweeper')
self.window.mainloop()
def game_creator(self):
if self.x > 25:
size = 15
self.window.geometry(f"{self.y * size}x{self.x * size}")
self.images('big')
else:
size = 21
self.window.geometry(f"{self.y * size}x{self.x * size}")
self.images('small')
for x in range(self.x):
for y in range(self.y):
pos = [x, y]
label = Label(self.window, borderwidth=1, relief='groove', bg='darkgrey')
self.matrixButtons[x][y] = Button(self.window, image = self.bgButton)
self.matrixButtons[x][y].bind("<Button-3>", partial(self.right_click, self.matrixButtons[x][y]))
if self.matrix[x][y] == 'M':
self.mines.append(self.matrixButtons[x][y])
self.matrixButtons[x][y].config(command = partial(self.game_over, self.matrixButtons[x][y], label))
label.config(image = self.mine)
else:
self.matrixButtons[x][y].config(command = partial(self.left_click, self.matrixButtons[x][y], pos))
self.put_pictures(x, y, label)
label.place(x= y*size, y = x*size)
self.matrixButtons[x][y].place(x= y*size, y = x*size)
def put_pictures(self, x, y, label):
if self.matrix[x][y] == 0: label.config(image = self.zero)
if self.matrix[x][y] == 1: label.config(image = self.one)
if self.matrix[x][y] == 2: label.config(image = self.two)
if self.matrix[x][y] == 3: label.config(image = self.three)
if self.matrix[x][y] == 4: label.config(image = self.four)
if self.matrix[x][y] == 5: label.config(image = self.five)
if self.matrix[x][y] == 6: label.config(image = self.six)
if self.matrix[x][y] == 7: label.config(image = self.seven)
def images(self, gameSize):
if gameSize == 'big':
self.zero = PhotoImage(file = "images/bigGame/zero.png")
self.one = PhotoImage(file = "images/bigGame/one.png")
self.two = PhotoImage(file = "images/bigGame/two.png")
self.three = PhotoImage(file = "images/bigGame/three.png")
self.four = PhotoImage(file = "images/bigGame/four.png")
self.five = PhotoImage(file = "images/bigGame/five.png")
self.six = PhotoImage(file = "images/bigGame/six.png")
self.seven = PhotoImage(file = "images/bigGame/seven.png")
self.mine = PhotoImage(file = "images/bigGame/mine.png")
self.explosion= PhotoImage(file = "images/bigGame/explosion.png")
self.flag = PhotoImage(file = "images/bigGame/flag.png")
self.bgButton = PhotoImage(file = "images/bigGame/backgroundButton.png")
if gameSize == 'small':
self.zero = PhotoImage(file = "images/smallGame/zero.png")
self.one = PhotoImage(file = "images/smallGame/one.png")
self.two = PhotoImage(file = "images/smallGame/two.png")
self.three = PhotoImage(file = "images/smallGame/three.png")
self.four = PhotoImage(file = "images/smallGame/four.png")
self.five = PhotoImage(file = "images/smallGame/five.png")
self.six = PhotoImage(file = "images/smallGame/six.png")
self.seven = PhotoImage(file = "images/smallGame/seven.png")
self.mine = PhotoImage(file = "images/smallGame/mine.png")
self.explosion= PhotoImage(file = "images/smallGame/explosion.png")
self.flag = PhotoImage(file = "images/smallGame/flag.png")
self.bgButton = PhotoImage(file = "images/smallGame/backgroundButton.png")
def left_click(self, button, pos):
x, y = pos
self.deletedButtons = []
button.destroy()
self.deletedButtons.append(button)
if self.matrix[x][y] == 0:
self.delete_blank_buttons(x, y)
def delete_blank_buttons(self, x, y):
for func in self.neighbors:
try:
xN, yN = func(x, y)
if xN < 0 or yN < 0:
raise IndexError
if self.matrix[xN][yN] != 'M':
if self.matrixButtons[xN][yN] not in self.deletedButtons:
if self.matrixButtons[xN][yN] not in self.flags:
self.matrixButtons[xN][yN].destroy()
self.deletedButtons.append(self.matrixButtons[xN][yN])
if self.matrix[xN][yN] == 0:
self.delete_blank_buttons(xN, yN)
except IndexError:
pass
def right_click(self, button, event):
if button['state'] == 'normal':
self.flags.append(button)
button.config(image = self.flag)
button['state'] = 'disabled'
self.victory()
else:
self.flags.remove(button)
button.config(image = self.bgButton)
button['state'] = 'normal'
self.victory()
def victory(self):
for button in self.mines:
if button not in self.flags:
return
if len(self.flags) != len(self.mines):
return
showinfo("You win!", "You win!")
self.window.destroy()
def game_over(self, button, label):
button.destroy()
label.config(image = self.explosion)
showinfo("Game Over!", "Game Over")
self.window.destroy()
if __name__ == '__main__':
while True:
rows = int(input("Type number of rows: "))
columns = int(input("Type number of columns: "))
mines = int(input("Type number of mines: "))
window = Tk()
matrix = Matrix(rows, columns, mines).main()
Minesweeper(window, matrix)
r = str(input("Continue? ")).upper()
if r[0] == 'N':
break
| 30.925234 | 119 | 0.512642 | 1,086 | 9,927 | 4.621547 | 0.138122 | 0.016338 | 0.095637 | 0.064555 | 0.431361 | 0.33672 | 0.277147 | 0.106196 | 0.06316 | 0.06316 | 0 | 0.008348 | 0.372519 | 9,927 | 320 | 120 | 31.021875 | 0.797399 | 0.006548 | 0 | 0.191919 | 0 | 0 | 0.089516 | 0.061707 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0.015152 | 0.020202 | 0 | 0.131313 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1b1a58925e03bd6db6d45cb4654b3c4f2ed010 | 631 | py | Python | src/data/727.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/727.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/727.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | n, q = list(map(int, input().split()))
g = [[] for _ in range(n)]
for i in range(n - 1):
a, b = list(map(lambda x: int(x) - 1, input().split()))
g[a].append(b)
g[b].append(a)
from collections import deque
def bfs(v):
q = deque()
q.append(v)
d = [-1] * n
d[v] = 0
while q:
v = q.popleft()
for u in g[v]:
if d[u] != -1: continue
d[u] = d[v] + 1
q.append(u)
return d
a = bfs(0)
for i in range(q):
c, d = list(map(lambda x: int(x) - 1, input().split()))
if (a[c] - a[d]) % 2:
print('Road')
else:
print('Town')
| 17.527778 | 59 | 0.448494 | 110 | 631 | 2.563636 | 0.354545 | 0.074468 | 0.078014 | 0.078014 | 0.205674 | 0.205674 | 0.205674 | 0.205674 | 0.205674 | 0 | 0 | 0.021845 | 0.347068 | 631 | 35 | 60 | 18.028571 | 0.662621 | 0 | 0 | 0 | 0 | 0 | 0.012678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.038462 | 0 | 0.115385 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1bf9d02d86498aac8fd6b706ecbc5b43754eaa | 1,214 | py | Python | art_app/forms.py | kyeugh/cop4710-artsite | 78576b4853bc2571fd560816dadbc8db5a6ae2bb | [
"MIT"
] | null | null | null | art_app/forms.py | kyeugh/cop4710-artsite | 78576b4853bc2571fd560816dadbc8db5a6ae2bb | [
"MIT"
] | null | null | null | art_app/forms.py | kyeugh/cop4710-artsite | 78576b4853bc2571fd560816dadbc8db5a6ae2bb | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from .models import Artwork, Artist, Collection
class RegistrationForm(UserCreationForm):
pronouns = forms.ChoiceField(
choices=(
(1, "they/them"),
(2, "he/him"),
(3, "she/her")
)
)
class Meta:
model = get_user_model()
fields = ("username", "email", "pronouns", "password1", "password2")
class EditProfileForm(forms.ModelForm):
pronouns = forms.ChoiceField(
choices=(
(1, "they/them"),
(2, "he/him"),
(3, "she/her")
)
)
class Meta:
model = Artist
fields = ("bio", "location", "pronouns")
class ArtworkForm(forms.ModelForm):
"""Form to submit a new Artwork."""
tags = forms.CharField(help_text="Enter a comma-separated list of tags.")
class Meta:
model = Artwork
fields = ("image", "title", "caption")
class CollectionForm(forms.ModelForm):
tags = forms.CharField(help_text="Enter a comma-separated list of tags.")
class Meta:
model = Collection
fields = ("name",)
| 24.77551 | 77 | 0.594728 | 130 | 1,214 | 5.507692 | 0.446154 | 0.050279 | 0.078212 | 0.058659 | 0.382682 | 0.382682 | 0.382682 | 0.382682 | 0.382682 | 0.382682 | 0 | 0.009101 | 0.275947 | 1,214 | 48 | 78 | 25.291667 | 0.805461 | 0.023888 | 0 | 0.444444 | 0 | 0 | 0.167091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.027778 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1d2fc6eda877e92101d99b44846437ac6790fd | 785 | py | Python | examples/datetimecol.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 215 | 2015-01-09T12:18:19.000Z | 2022-01-31T00:18:29.000Z | examples/datetimecol.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 93 | 2015-02-03T22:39:02.000Z | 2022-01-26T04:12:16.000Z | examples/datetimecol.py | nullptrT/flask_table | d4577307bf3b790fb1d91238019577beb477ee4a | [
"BSD-3-Clause"
] | 48 | 2015-04-29T09:23:34.000Z | 2022-01-21T13:50:39.000Z | import os
from datetime import datetime
# Run this example with LC_TIME=[other locale] to use a different
# locale's datetime formatting, eg:
#
# LC_TIME=en_US python examples/datetimecol.py
# or
# LC_TIME=en_GB python examples/datetimecol.py
os.environ.setdefault('LC_TIME', 'en_GB') # noqa
from flask_table import Table, Col, DatetimeCol
class Item(object):
def __init__(self, name, dt):
self.name = name
self.dt = dt
class ItemTable(Table):
name = Col('Name')
dt = DatetimeCol('Datetime')
def main():
items = [
Item('Name1', datetime.now()),
Item('Name2', datetime(2018, 1, 1, 12, 34, 56)),
]
table = ItemTable(items)
# or {{ table }} in jinja
print(table.__html__())
if __name__ == '__main__':
main()
| 20.128205 | 65 | 0.647134 | 108 | 785 | 4.481481 | 0.527778 | 0.049587 | 0.049587 | 0.11157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022913 | 0.221656 | 785 | 38 | 66 | 20.657895 | 0.769231 | 0.278981 | 0 | 0 | 0 | 0 | 0.075404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.45 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1dcb70ea43341de423c68976e0cc57c3119a36 | 3,560 | py | Python | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 6 | 2022-01-15T11:57:19.000Z | 2022-02-13T21:15:22.000Z | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | null | null | null | oncopolicy/utils/generic.py | yala/Tempo | bf3e0e78d64869bb2079c582a4a35982f78386ad | [
"MIT"
] | 2 | 2022-02-02T13:09:29.000Z | 2022-02-18T07:06:19.000Z | import datetime
import hashlib
import numpy as np
from copy import deepcopy
import torch
import pdb
INVALID_DATE_STR = "Date string not valid! Received {}, and got exception {}"
ISO_FORMAT = '%Y-%m-%d %H:%M:%S'
CGMH_ISO_FORMAT ='%Y%m%d'
DAYS_IN_YEAR = 365
DAYS_IN_MO = 30
MAX_MO_TO_CANCER = 1200
MIN_MO_TO_CANCER = 3
MAX_PREFERNCES = 10.0
MIN_PREFERNCES = 0
EPSILON = 1e-3
AVG_MOMENTUM = 0.95
NUM_DIM_AUX_FEATURES = 7 ## Deprecated
class AverageMeter():
def __init__(self):
self.avg = 0
self.first_update = True
def reset(self):
self.avg = 0
self.first_update = True
def update(self, val_tensor):
val = val_tensor.item()
if self.first_update:
self.avg = val
self.first_update = False
else:
self.avg = (AVG_MOMENTUM * self.avg) + (1-AVG_MOMENTUM) * val
assert self.avg >= 0 and val >= 0
def get_aux_tensor(tensor, args):
## use of auxillary features for screen is deprecated
return torch.zeros([tensor.size()[0], NUM_DIM_AUX_FEATURES]).to(tensor.device)
def to_numpy(tensor):
return tensor.cpu().numpy()
def to_tensor(arr, device):
return torch.Tensor(arr).to(device)
def sample_preference_vector(batch_size, sample_random, args):
if sample_random:
dist = torch.distributions.uniform.Uniform(MIN_PREFERNCES, MAX_PREFERNCES)
preferences = dist.sample([batch_size, len(args.metrics), 1])
else:
preferences = torch.ones(batch_size, len(args.metrics), 1)
preferences *= torch.tensor(args.fixed_preference).unsqueeze(0).unsqueeze(-1)
preferences = preferences + EPSILON
preferences = (preferences / (preferences).sum(dim=1).unsqueeze(-1))
return preferences.to(args.device)
def normalize_dictionary(dictionary):
'''
Normalizes counts in dictionary
:dictionary: a python dict where each value is a count
:returns: a python dict where each value is normalized to sum to 1
'''
num_samples = sum([dictionary[l] for l in dictionary])
for label in dictionary:
dictionary[label] = dictionary[label]*1. / num_samples
return dictionary
def parse_date(iso_string):
'''
Takes a string of format "YYYY-MM-DD HH:MM:SS" and
returns a corresponding datetime.datetime obj
throws an exception if this can't be done.
'''
try:
return datetime.datetime.strptime(iso_string, ISO_FORMAT)
except Exception as e:
raise Exception(INVALID_DATE_STR.format(iso_string, e))
def md5(key):
'''
returns a hashed with md5 string of the key
'''
return hashlib.md5(key.encode()).hexdigest()
def pad_array_to_length(arr, pad_token, max_length):
arr = arr[:max_length]
return np.array( arr + [pad_token]* (max_length - len(arr)))
def fast_forward_exam_by_one_time_step(curr_exam, NUM_DAYS_IN_TIME_STEP):
exam = deepcopy(curr_exam)
est_date_of_last_followup = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_YEAR * curr_exam['years_to_last_followup']))
est_date_of_cancer = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_MO * curr_exam['months_to_cancer']))
exam['date'] = curr_exam['date'] + datetime.timedelta(days=int(NUM_DAYS_IN_TIME_STEP))
exam['years_to_last_followup'] = (est_date_of_last_followup - exam['date']).days / DAYS_IN_YEAR
exam['months_to_cancer'] = (est_date_of_cancer - exam['date']).days / DAYS_IN_MO
exam['has_cancer'] = exam['months_to_cancer'] < MIN_MO_TO_CANCER
exam['time_stamp'] = curr_exam['time_stamp'] + 1
return exam
| 33.584906 | 132 | 0.69691 | 524 | 3,560 | 4.486641 | 0.316794 | 0.020417 | 0.025521 | 0.025521 | 0.223309 | 0.166738 | 0.130157 | 0.091876 | 0.064653 | 0 | 0 | 0.013185 | 0.190449 | 3,560 | 105 | 133 | 33.904762 | 0.802568 | 0.11236 | 0 | 0.081081 | 0 | 0 | 0.072698 | 0.014216 | 0 | 0 | 0 | 0 | 0.013514 | 1 | 0.162162 | false | 0 | 0.081081 | 0.040541 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a1f6028964148dcc46c1ff12bbb3ff8d2b421b7 | 1,159 | py | Python | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | relocate_xaltjson.py | adityakavalur/slurm-docker-cluster | d54703ddcab9d456be4743dae0f51daf3d549df5 | [
"MIT"
] | null | null | null | import grp
import pwd
import os
import json
import fnmatch
from glob import glob
org_dir="/data/xalt2_json"
reloc_dir="/data/xalt2_json_moved"
xalt_dir=glob(org_dir+"/*")
user=pwd.getpwuid(os.getuid()).pw_uid
#move dir at the end of the run
for slurmjobs in xalt_dir:
stat_info = os.stat(slurmjobs)
uid = stat_info.st_uid
if (uid == user):
slurmjobs2=slurmjobs+"/*"
xalt2list=glob(slurmjobs2)
for job2 in xalt2list:
movefile = False
with open(job2) as json_file:
data = json.load(json_file)
if 'userT' in data:
if data["userT"]["job_id"] == os.environ.get('SLURM_JOBID') :
movefile = True
if (movefile):
xaltnum=slurmjobs
xaltnum=slurmjobs.replace(org_dir,'')
if not os.path.exists(reloc_dir+xaltnum):
os.makedirs(reloc_dir+xaltnum)
moveddir = job2.replace(org_dir,reloc_dir)
os.replace(job2,moveddir)
#This needs to be done elsewhere
##delete empty folders
#for slurmjobs in xalt_dir:
# print(len(fnmatch.filter(os.listdir(slurmjobs), '*.json')))
| 28.268293 | 76 | 0.623814 | 156 | 1,159 | 4.49359 | 0.455128 | 0.034237 | 0.028531 | 0.045649 | 0.059914 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011737 | 0.264884 | 1,159 | 40 | 77 | 28.975 | 0.811033 | 0.145815 | 0 | 0 | 0 | 0 | 0.070122 | 0.022358 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a228507532d3492cb247acb443659a30d0727c0 | 3,873 | py | Python | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-05-10T22:19:44.000Z | 2020-07-21T07:28:39.000Z | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 187 | 2018-04-13T17:19:24.000Z | 2020-04-21T00:41:15.000Z | python/dsbox/template/template_files/loaded/DefaultLinkPredictionTemplate.py | usc-isi-i2/dsbox-ta2 | 85e0e8f5bbda052fa77cb98f4eef1f4b50909fd2 | [
"MIT"
] | 7 | 2018-07-10T00:14:07.000Z | 2019-07-25T17:59:44.000Z | from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class DefaultLinkPredictionTemplate(DSBoxTemplate):
'''
Dummy implementation that does not look at the underlying graph at all.
'''
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "Default_LinkPrediction_Template",
"taskType": {TaskKeyword.LINK_PREDICTION.name},
# for some special condition, the taskSubtype can be "NONE" which indicate no taskSubtype given
"taskSubtype": {TaskKeyword.LINK_PREDICTION.name},
"inputType": {"graph", "edgeList"},
"output": "model_step",
"steps": [
{
"name": "to_dataframe_step",
"primitives": ["d3m.primitives.data_transformation.dataset_to_dataframe.Common"],
"inputs": ["template_input"]
},
{
"name": "common_profiler_step",
"primitives": ["d3m.primitives.schema_discovery.profiler.Common"],
"inputs": ["to_dataframe_step"]
},
{
"name": "extract_attribute_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/PrimaryKey',
'https://metadata.datadrivendiscovery.org/types/Attribute',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "to_numeric_step",
"primitives": ["d3m.primitives.data_transformation.to_numeric.DSBOX"],
"inputs":["extract_attribute_step"],
},
{
"name": "extract_target_step",
"primitives": [{
"primitive": "d3m.primitives.data_transformation.extract_columns_by_semantic_types.Common",
"hyperparameters":
{
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TrueTarget',),
'use_columns': (),
'exclude_columns': ()
}
}],
"inputs": ["common_profiler_step"]
},
{
"name": "model_step",
"primitives": [{
"primitive": "d3m.primitives.classification.random_forest.SKlearn",
"hyperparameters": {
# 'bootstrap': ["bootstrap", "disabled"],
'max_depth': [15, 30, None],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
'max_features': ['auto', 'sqrt'],
'n_estimators': [10, 50, 100],
'add_index_columns': [True],
'use_semantic_types':[True],
}
}
],
"inputs": ["to_numeric_step", "extract_target_step"]
}
]
}
| 46.107143 | 115 | 0.452879 | 265 | 3,873 | 6.34717 | 0.433962 | 0.049941 | 0.040428 | 0.073722 | 0.347206 | 0.302021 | 0.248514 | 0.248514 | 0.248514 | 0.186683 | 0 | 0.011542 | 0.440744 | 3,873 | 83 | 116 | 46.662651 | 0.765005 | 0.056804 | 0 | 0.194805 | 0 | 0 | 0.340947 | 0.119978 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.077922 | 0 | 0.103896 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a22df4be7ea2aa5d47270ce9c3cf858a95fcab4 | 10,248 | py | Python | few_shots_clf/triplet_classifier/triplet_classifier.py | delmalih/few-shots-classification | 8b06ff673882fd0d8b99cd705e5e5fab0ec93fb3 | [
"MIT"
] | null | null | null | few_shots_clf/triplet_classifier/triplet_classifier.py | delmalih/few-shots-classification | 8b06ff673882fd0d8b99cd705e5e5fab0ec93fb3 | [
"MIT"
] | null | null | null | few_shots_clf/triplet_classifier/triplet_classifier.py | delmalih/few-shots-classification | 8b06ff673882fd0d8b99cd705e5e5fab0ec93fb3 | [
"MIT"
] | null | null | null | # pylint: disable=attribute-defined-outside-init, no-member, line-too-long, too-many-instance-attributes
##########################
# Imports
##########################
import os
from typing import Dict, List
import pickle
import numpy as np
from tensorflow import keras
from easydict import EasyDict as edict
from few_shots_clf import utils
from few_shots_clf.triplet_classifier import constants
from few_shots_clf.triplet_classifier import utils as triplet_utils
##########################
# TripletClassifier
##########################
class TripletClassifier:
"""Class implementing the Classifier trained on triplet loss (TripletClassifier)
Args:
catalog_path (string): [description]
params (dict): [description]
"""
##########################
# Init
##########################
def __init__(self, catalog_path: str, params: Dict = {}):
self.catalog_path = catalog_path
self._config_classifier(catalog_path, params)
##########################
# Config
##########################
def _config_classifier(self, catalog_path, params):
self._get_classifier_config(params)
self._get_catalog_images(catalog_path)
self._get_catalog_labels(catalog_path)
self._get_catalog_images2labels()
self._get_triplet_model()
self._compile_triplet_model()
self._load_fingerprints()
def _get_classifier_config(self, params):
self.config = edict({
"verbose": params.get("verbose", constants.VERBOSE),
"image_size": params.get("image_size", constants.IMAGE_SIZE),
"triplet_margin": params.get("triplet_margin", constants.TRIPLET_MARGIN),
"mining_strategy": params.get("mining_strategy", constants.MINING_STRATEGY),
"embedding_size": params.get("embedding_size", constants.EMBEDDING_SIZE),
"basic_batch_size": params.get("basic_batch_size", constants.BASIC_BATCH_SIZE),
"augment_factor": params.get("augment_factor", constants.AUGMENT_FACTOR),
"n_epochs": params.get("n_epochs", constants.N_EPOCHS),
"model_backbone": params.get("model_backbone", constants.MODEL_BACKBONE),
"learning_rate": params.get("learning_rate", constants.LEARNING_RATE),
"model_path": params.get("model_path", constants.MODEL_PATH),
"fingerprint_path": params.get("fingerprint_path", constants.FINGERPRINT_PATH),
})
self.config.batch_size = self.config.basic_batch_size * self.config.augment_factor
def _get_catalog_images(self, catalog_path):
self.catalog_images = utils.get_all_images_from_folder(catalog_path)
if self.config.verbose:
print(f"Found {len(self.catalog_images)} images!")
def _get_catalog_labels(self, catalog_path):
self.catalog_labels = utils.get_labels_from_catalog(catalog_path)
if self.config.verbose:
print(f"Found {len(self.catalog_labels)} labels!")
def _get_catalog_images2labels(self):
self.catalog_images2labels = utils.compute_images2labels(self.catalog_images,
self.catalog_labels)
def _get_triplet_model(self):
self.triplet_model = triplet_utils.TripletModel(self.config.embedding_size,
self.config.model_backbone)
self.triplet_model.build(input_shape=(self.config.batch_size,
self.config.image_size,
self.config.image_size,
3))
if self.config.verbose:
self.triplet_model.summary()
def _compile_triplet_model(self):
triplet_loss = triplet_utils.triplet_loss_function(self.config.triplet_margin,
self.config.mining_strategy)
triplet_metric = triplet_utils.triplet_loss_metric(
self.config.triplet_margin)
self.triplet_model.compile(optimizer=keras.optimizers.Adam(lr=self.config.learning_rate),
loss=triplet_loss,
metrics=[triplet_metric])
def _load_fingerprints(self):
# Previous fingerprint
if os.path.exists(self.config.fingerprint_path):
with open(self.config.fingerprint_path, "rb") as pickle_file:
self.config.fingerprint = pickle.load(pickle_file)
else:
self.config.fingerprint = ""
# Current fingerprint
self.fingerprint = triplet_utils.compute_fingerprint(self.catalog_path,
self.config)
##########################
# Train
##########################
def train(self):
"""Method used to train the classifier.
"""
train_generator = self._get_data_generator()
self.triplet_model.fit_generator(generator=train_generator,
epochs=self.config.n_epochs,
verbose=self.config.verbose,
use_multiprocessing=False,
callbacks=self._get_model_callbacks())
def _get_data_generator(self) -> triplet_utils.DataGenerator:
catalog_labels = list(
map(lambda img: self.catalog_images2labels[img], self.catalog_images))
catalog_label_ids = np.float32(
list(map(self.label_str2id, catalog_labels)))
return triplet_utils.DataGenerator(self.catalog_images,
catalog_label_ids,
self.config.image_size,
self.config.basic_batch_size,
self.config.augment_factor)
def _get_model_callbacks(self) -> List:
reduce_lr_on_plateau_callback = keras.callbacks.ReduceLROnPlateau(monitor='loss',
verbose=self.config.verbose)
checkpointer_callback = keras.callbacks.ModelCheckpoint(self.config.model_path,
save_best_only=True,
monitor='loss',
verbose=self.config.verbose)
early_stopping_callback = keras.callbacks.EarlyStopping(monitor='loss',
patience=10,
verbose=self.config.verbose)
return [reduce_lr_on_plateau_callback,
checkpointer_callback,
early_stopping_callback]
def compute_catalog_embeddings(self) -> np.array:
"""[summary]
Returns:
np.array: [description]
"""
# Init. catalog embeddings
self.catalog_embeddings = []
# Loop over catalog images
for catalog_img_path in utils.get_iterator(self.catalog_images,
verbose=self.config.verbose):
# Read catalog image
catalog_image = utils.read_image(catalog_img_path,
size=self.config.image_size)
catalog_image = np.expand_dims(catalog_image, axis=0)
# Compute embedding
catalog_emdding = self.triplet_model.predict(catalog_image)[0]
# Update catalog_emddings
self.catalog_embeddings.append(catalog_emdding)
self.catalog_embeddings = np.array(self.catalog_embeddings)
##########################
# Predict
##########################
def load_best_model(self):
"""Loads the best weights from previous training
"""
self.triplet_model.load_weights(self.config.model_path)
def predict(self, query_path: str) -> np.array:
"""Method used to predict a score per class for a given query.
Args:
query_path (str): The local path of the query.
Returns:
np.array: The list of scores per class.
"""
# Read img
query_img = utils.read_image(query_path, size=self.config.image_size)
query_img = np.expand_dims(query_img, axis=0)
# Get query embedding
query_embedding = self.triplet_model.predict(query_img)
# Get scores
scores = self._get_query_scores(query_embedding)
scores = np.array(scores)
return scores
def _get_query_scores(self, query_embedding: np.array):
# Compute pairwise distances
pairwise_distances = np.linalg.norm(query_embedding[:, None, :] -
self.catalog_embeddings[None, :, :],
axis=-1)
# Compute scores
scores = np.exp(-pairwise_distances ** 2)
# Compute predicted label and score
predicted_catalog_image_id = np.argmax(scores, axis=-1)[0]
predicted_catalog_image = self.catalog_images[predicted_catalog_image_id]
predicted_label = self.catalog_images2labels[predicted_catalog_image]
predicted_score = np.max(scores, axis=-1)[0]
return predicted_label, predicted_score
##########################
# Utils
##########################
def label_id2str(self, label_id: int) -> str:
"""Gets the label_str given the label_id.
Args:
label_id (int): The given label_id.
Returns:
str: The label_str of the given label_id.
"""
return self.catalog_labels[label_id]
def label_str2id(self, label_str: str) -> int:
"""Gets the label_id given the label_str.
Args:
label_str (str): The given label_str.
Returns:
int: The label_id of the given label_id.
"""
if label_str in self.catalog_labels:
return self.catalog_labels.index(label_str)
return -1
| 40.03125 | 104 | 0.569672 | 1,026 | 10,248 | 5.400585 | 0.184211 | 0.063165 | 0.02274 | 0.017145 | 0.156831 | 0.102148 | 0.054142 | 0.040426 | 0.040426 | 0.040426 | 0 | 0.003433 | 0.317916 | 10,248 | 255 | 105 | 40.188235 | 0.78927 | 0.116511 | 0 | 0.065217 | 0 | 0 | 0.046759 | 0.00614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.065217 | 0 | 0.253623 | 0.072464 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a22fe21071891c677cbdc4409f946c2979fd518 | 383 | py | Python | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 25 | 2015-04-16T04:31:49.000Z | 2022-03-10T15:53:28.000Z | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2018-11-21T22:40:02.000Z | 2018-11-26T17:53:11.000Z | tests/python-reference/property/simple_property_decorator.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2021-03-26T03:36:19.000Z | 2021-03-26T03:36:19.000Z | class C(object):
def __init__(self):
self.x = 42
@property
def f(self):
self.x += 1
return self.x
@f.setter
def f(self, value):
self.x = value
@f.deleter
def f(self):
del self.x
c = C()
assert c.x == 42
assert c.f == 43
c.f = 55
assert c.x == 55
assert c.f == 56
del c.f
assert not hasattr(c, 'x')
assert not hasattr(c, 'f')
assert hasattr(C, 'f')
| 15.32 | 26 | 0.582245 | 74 | 383 | 2.959459 | 0.297297 | 0.054795 | 0.109589 | 0.155251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.253264 | 383 | 24 | 27 | 15.958333 | 0.72028 | 0 | 0 | 0.086957 | 0 | 0 | 0.007833 | 0 | 0 | 0 | 0 | 0 | 0.304348 | 1 | 0.173913 | false | 0 | 0 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a24335a2137933438ce5640a47cf8d7c1a859b7 | 8,111 | py | Python | papers/cats/utility/get_online_results.py | Ark-kun/vowpal_wabbit | d811c93fa6adbb513729698202984e3662a3d8df | [
"BSD-3-Clause"
] | 4,332 | 2015-01-01T10:26:51.000Z | 2018-10-01T14:05:43.000Z | papers/cats/utility/get_online_results.py | chrinide/vowpal_wabbit | 40e1fef676ca6a461d71cf0631ab5c63d1af5d8a | [
"BSD-3-Clause"
] | 1,004 | 2015-01-01T12:00:54.000Z | 2018-09-30T22:13:42.000Z | papers/cats/utility/get_online_results.py | chrinide/vowpal_wabbit | 40e1fef676ca6a461d71cf0631ab5c63d1af5d8a | [
"BSD-3-Clause"
] | 1,182 | 2015-01-02T20:38:55.000Z | 2018-09-26T02:47:37.000Z | import sys
import getopt
from confidence_interval import ConfidenceInterval
def nextword(target, source):
for i, w in enumerate(source):
if w == target:
return source[i + 1]
class LossStructOn:
def __init__(
self, model, n, h, loss, time, max_cost, nb_examples, ci_lower, ci_upper
):
self.model = model
self.n = n
self.h = h
self.loss = loss
self.time = time
self.max_cost = max_cost
self.nb_examples = nb_examples
self.ci_lower = ci_lower
self.ci_upper = ci_upper
class EvaluatorOnline:
def __init__(self, file_name, alpha, quiet):
self.file_name = file_name
self.conf_alpha = alpha
self.costs = []
self.best_cats = LossStructOn("cats", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0)
self.best_disc_tree = LossStructOn(
"disc_tree", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0
)
self.best_disc_linear = LossStructOn(
"disc_linear", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0
)
self.max_time = 0.0
self.quiet = quiet
def eval(self):
data_file = open(self.file_name, "r")
line = data_file.readline()
while line:
# Get data
if line.find("CATS-online") != -1:
self.costs.append(
LossStructOn("cats", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0)
)
elif line.find("Discretized-Tree-online") != -1:
self.costs.append(
LossStructOn("disc_tree", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0)
)
elif line.find("Discretized-Linear-online") != -1:
self.costs.append(
LossStructOn("disc_linear", 0, 0, sys.float_info.max, 0, 0, 0, 0, 0)
)
elif line.find("timeout") != -1:
s1 = line.split()
self.max_time = float(nextword("timeout", s1))
elif line.find("n = ") != -1:
separator_position = len("n = ")
separator_position_end = line.find("\n")
self.costs[len(self.costs) - 1].n = float(
line[separator_position:separator_position_end]
)
elif line.find("h = ") != -1:
separator_position = len("h = ")
separator_position_end = line.find("\n")
self.costs[len(self.costs) - 1].h = float(
line[separator_position:separator_position_end]
)
elif line.find("Max Cost=") != -1:
separator_position = len("Max Cost=")
self.costs[len(self.costs) - 1].max_cost = float(
line[separator_position:]
)
elif line.find("number of examples") != -1:
s1 = line.split()
self.costs[len(self.costs) - 1].nb_examples = int(nextword("=", s1))
elif line.find("average loss") != -1:
s1 = line.split()
self.costs[len(self.costs) - 1].loss = float(nextword("=", s1))
elif line.find("real") != -1:
s1 = line.split()
self.costs[len(self.costs) - 1].time = float(nextword("real", s1))
line = data_file.readline()
self.get_best_loss()
self.saveConfidenceIntervals(self.best_cats)
self.saveConfidenceIntervals(self.best_disc_tree)
self.saveConfidenceIntervals(self.best_disc_linear)
if not self.quiet:
self.printAllResults()
print("max_time = ", self.max_time)
self.printBestResults(self.best_cats)
self.printBestResults(self.best_disc_tree)
self.printBestResults(self.best_disc_linear)
self.find_error()
def return_loss(self, model):
if model == "cats":
return self.best_cats.loss, self.best_cats.ci_lower, self.best_cats.ci_upper
elif model == "disc_tree":
return (
self.best_disc_tree.loss,
self.best_disc_tree.ci_lower,
self.best_disc_tree.ci_upper,
)
elif model == "disc_linear":
return (
self.best_disc_linear.loss,
self.best_disc_linear.ci_lower,
self.best_disc_linear.ci_upper,
)
def return_all(self, model):
n_ = []
h_ = []
loss_ = []
time_ = []
for c in self.costs:
if c.model == model:
if c.loss < 1:
loss_.append(c.loss)
time_.append(c.time)
n_.append(c.n)
h_.append(c.h)
return loss_, time_, n_, h_
def get_best_loss(self):
for c in self.costs:
if c.model == "cats":
if c.loss < self.best_cats.loss:
self.best_cats = c
elif c.model == "disc_tree":
if c.loss < self.best_disc_tree.loss:
self.best_disc_tree = c
elif c.model == "disc_linear":
if c.loss < self.best_disc_linear.loss:
self.best_disc_linear = c
def saveConfidenceIntervals(self, cost):
if cost.max_cost != 0:
cost.ci_lower, cost.ci_upper = ConfidenceInterval.calculate(
cost.nb_examples, cost.loss, cost.max_cost, self.conf_alpha
)
def getTime(self, model, n, hp, h, mode): # assumes costs is soreted wrt hp and n
times = []
if mode == "hp":
n_ = []
for c in self.costs:
if c.model == model:
if c.h == hp:
times.append(c.time)
n_.append(c.n)
return times, n_
elif mode == "h":
n_ = []
for c in self.costs:
if c.model == model:
if (c.h / c.n) == h:
times.append(c.time)
n_.append(c.n)
return times, n_
elif mode == "n":
h_ = []
for c in self.costs:
if c.model == model:
if c.n == n:
times.append(c.time)
h_.append(c.h)
return times, h_
def printAllResults(self):
for cost in self.costs:
print(
"model, n, h, loss, time = {0}, {1}, {2}, {3}, {4}".format(
cost.model, cost.n, cost.h, cost.loss, cost.time
)
)
def printBestResults(self, cost):
print(
"model, n, h, loss, time = {0}, {1}, {2}, {3}, {4}".format(
cost.model, cost.n, cost.h, cost.loss, cost.time
)
)
print("C.I. = {0}, {1}".format(cost.ci_lower, cost.ci_upper))
def find_error(self):
for c in self.costs:
if c.loss == sys.float_info.max:
if c.time < self.max_time:
print("error in model={0}, n={1}, h={2}".format(c.model, c.n, c.h))
if __name__ == "__main__":
namee = "BNG_cpu_act"
data_file = "../../results/" + namee + "_online_validation.txt"
alpha = 0.05
model = "cats"
quiet = False
# Parse options - get predict and data file names
args = sys.argv[1:]
opts, args = getopt.getopt(
args, "d:a:r:q", ["data_file=", "alpha=", "return_model=", "quiet"]
)
for opt, arg in opts:
if opt in ("-d", "--data_file"):
data_file = arg
elif opt in ("-a", "--alpha"):
alpha = float(arg)
elif opt in ("-r", "--return_model"):
model = arg
elif opt in ("-q", "--quiet"):
quiet = True
# Print join lines to stdout
fileJoiner = EvaluatorOnline(data_file, alpha, quiet)
returnValue = fileJoiner.eval()
print(fileJoiner.return_loss(model))
print(fileJoiner.getTime("disc_linear", 0, 0, 0, "hp"))
| 33.241803 | 88 | 0.496116 | 980 | 8,111 | 3.933673 | 0.128571 | 0.017121 | 0.014786 | 0.012451 | 0.475227 | 0.380026 | 0.343709 | 0.312581 | 0.29987 | 0.260441 | 0 | 0.018471 | 0.379238 | 8,111 | 243 | 89 | 33.378601 | 0.74717 | 0.014918 | 0 | 0.226601 | 0 | 0.009852 | 0.071133 | 0.008766 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059113 | false | 0 | 0.014778 | 0 | 0.123153 | 0.064039 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4a25879b3b9821620d7aacc564df1c717b81e8c1 | 3,445 | py | Python | python/names.py | tmcombi/tmcombi | 976d3f333c01104e5efcabd8834854ad7677ea73 | [
"MIT"
] | null | null | null | python/names.py | tmcombi/tmcombi | 976d3f333c01104e5efcabd8834854ad7677ea73 | [
"MIT"
] | null | null | null | python/names.py | tmcombi/tmcombi | 976d3f333c01104e5efcabd8834854ad7677ea73 | [
"MIT"
] | 3 | 2019-03-31T19:04:20.000Z | 2020-01-13T22:32:09.000Z | import unittest
import re
import sys
class Feature:
def __init__(self):
self.name = 'target'
self.type = ''
self.values = []
def dump(self, out_stream=sys.stdout):
print(self.name + ': ', end='', file=out_stream)
if self.type == 'categorical':
print(', '.join(self.values), end='', file=out_stream)
print('.', file=out_stream)
else:
print(self.type + '.', file=out_stream)
class Names:
def __init__(self):
self.feature = {}
self.feature_list = []
self.target_feature = 'target'
def size(self):
return len(self.feature_list)
def target_index(self):
return self.feature_list.index(self.target_feature)
def dump(self, out_stream=sys.stdout):
print(self.target_feature + '. | the target attribute', file=out_stream)
for feature_name in self.feature_list:
self.feature[feature_name].dump(out_stream)
@staticmethod
def process_line(line):
empty = True
feature = Feature()
line = re.sub(r"\n", "", line)
line = re.sub(r"[ ]*\|.*", "", line)
line = re.sub(r"[\. ]*$", "", line)
line = re.sub(r"^[ ]*", "", line)
if line == '':
return empty, feature
empty = False
data = re.split(":", line, 1)
data[0] = re.sub("[ ]*$", "", data[0])
if re.search(",", data[0]):
data.append(data[0])
else:
feature.name = data[0]
if len(data) < 2:
return empty, feature
data[1] = re.sub("^[ ]*", "", data[1])
if data[1] == '':
return empty, feature
if data[1] in ['continuous', 'ignore', 'label']:
feature.type = data[1]
return empty, feature
feature.type = 'categorical'
for value in re.split(",", data[1]):
value = re.sub("[ ]*$", "", value)
value = re.sub("^[ ]*", "", value)
feature.values.append(value)
return empty, feature
def from_file(self, file):
fp = open(file, 'r')
empty, target_feature = Names.process_line(fp.readline())
while empty:
empty, target_feature = Names.process_line(fp.readline())
self.target_feature = target_feature.name
line = fp.readline()
while line:
empty, feature = Names.process_line(line)
if not empty:
self.feature[feature.name] = feature
self.feature_list.append(feature.name)
line = fp.readline()
fp.close()
if self.target_feature not in self.feature_list:
self.feature[self.target_feature] = target_feature
self.feature_list.append(self.target_feature)
return self
class TestNames(unittest.TestCase):
def test_feature(self):
f = Feature()
f.name = 'testName'
self.assertEqual(f.name, 'testName')
def test_names_basic(self):
N = Names()
self.assertTrue(N.target_feature == 'target')
self.assertFalse(N.size() < 0)
def test_names_real_file(self):
N = Names().from_file('adult.names')
self.assertEqual(N.size(), 15)
out_stream = open('adult1.names', 'w')
N.dump(out_stream)
out_stream.close()
self.assertFalse(0 > 0)
if __name__ == "__main__":
unittest.main()
| 30.486726 | 80 | 0.543977 | 403 | 3,445 | 4.498759 | 0.188586 | 0.086045 | 0.057915 | 0.022063 | 0.255929 | 0.146718 | 0.11583 | 0.11583 | 0.067292 | 0.025372 | 0 | 0.007997 | 0.310305 | 3,445 | 112 | 81 | 30.758929 | 0.755051 | 0 | 0 | 0.157895 | 0 | 0 | 0.053701 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.115789 | false | 0 | 0.031579 | 0.021053 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58259a66b4ddcdcd7f4a1f751602df9c2cfdf8d | 559 | py | Python | source/python/DiceGame.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | source/python/DiceGame.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | source/python/DiceGame.py | JoHyukJun/algorithm-analysis | 3eda22ce0eeb52490702206d73c04cff1eb3e72d | [
"Apache-2.0"
] | null | null | null | '''
main.py
Created by Jo Hyuk Jun on 2020
Copyright © 2020 Jo Hyuk Jun. All rights reserved.
'''
import sys
from collections import Counter
n = int(sys.stdin.readline())
score = []
for _ in range(n):
dices = list(map(int, sys.stdin.readline().rstrip().split(' ')))
ckr = Counter(dices)
result = ckr.most_common(1)[0]
if result[1] == 1:
score.append(max(dices) * 100)
elif result[1] == 2:
score.append(1000 + result[0] * 100)
else:
score.append(10000 + result[0] * 1000)
print(max(score)) | 18.032258 | 68 | 0.599284 | 81 | 559 | 4.123457 | 0.592593 | 0.098802 | 0.053892 | 0.113772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 0.248658 | 559 | 31 | 69 | 18.032258 | 0.709524 | 0.161002 | 0 | 0 | 0 | 0 | 0.002232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58514720b1b21a6355dfbbd91e60a2507a2e5f4 | 11,748 | py | Python | tests/test_imx_client.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 13 | 2021-12-11T11:52:32.000Z | 2022-03-11T12:58:56.000Z | tests/test_imx_client.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 1 | 2021-12-19T19:15:29.000Z | 2021-12-26T14:09:16.000Z | tests/test_imx_client.py | Dimfred/imxpy | 289a67fa51ef7b33ee106a65ad69340d07c986b3 | [
"MIT"
] | 1 | 2022-01-10T15:01:04.000Z | 2022-01-10T15:01:04.000Z | from imx_objects import *
from utils import SafeNumber
import time
class TestUtility:
def test_okay_sign_msg(self, client):
params = SignMsgParams(msg="{'test':'test'")
res = client.sign_msg(params)
res = res.result()
# TODO actually thats not okay currently it returns only
# success but not the signed message
# related to IMX not us
assert res["status"] == "success", res
def test_okay_user_registered(self, client):
res = client.register()
res = res.result()
assert res["status"] == "success", res
def test_okay_project_created(self, client):
params = CreateProjectParams(
name="test_proj", company_name="test_company", contact_email="test@test.com"
)
res = client.create_project(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["id"], res
def test_okay_collection_created_and_updated(self, client, project_id, random_addr):
return
# imx now returns an error when the contract_addr does not contain byte code
# therefore one can't use random_addr anymore
params = CreateCollectionParams(
name="test",
contract_addr=random_addr,
owner_public_key="test",
project_id=project_id,
metadata_api_url="https://test.com",
description="test",
icon_url="https://test.com/icon",
collection_image_url="https://test.com/collection_image",
)
res = client.create_collection(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["address"] == random_addr, res
params = UpdateCollectionParams(name="test2", contract_addr=random_addr)
res = client.update_collection(params)
res = res.result()
# TODO somehow imx returns the wrong values, but they have been updated
# normally, just retry and see what happens
res = client.update_collection(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["name"] == "test2", res
def test_okay_metadata_schema_added_and_updated(
self, client, contract_addr, random_str
):
schema = [{"name": random_str, "type": "text", "filterable": False}]
params = CreateMetadataSchemaParams(
contract_addr=contract_addr, metadata=schema
)
res = client.create_metadata_schema(params)
res = res.result()
assert res["status"] == "success", res
params = UpdateMetadataSchemaParams(
contract_addr=contract_addr, name=random_str, new_name=random_str + "i"
)
res = client.update_metadata_schema(params)
res = res.result()
assert res["status"] == "success", res
def test_okay_create_exchange(self, client, acc1):
pass
# params = CreateExchangeParams(wallet_addr=acc1.addr)
# res = client.create_exchange(params)
# res = res.result()
# TODO currently throws error, probably because it is not possible to create
# on mainnet? However, the call is there and should work correctly
class TestTransfer:
def get_balance(self, client, addr):
res = client.db.balances(addr)
return int(res["result"][0]["balance"])
def test_okay_simple_eth(self, client, acc1, acc2):
params = TransferParams(
sender=acc1.addr,
receiver=acc2.addr,
token=ETH(quantity="0.00001"),
)
res = client.transfer(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["transfer_id"], res
def test_okay_simple_erc721(self, client, token_id, acc1, acc2, contract_addr):
params = TransferParams(
sender=acc1.addr,
receiver=acc2.addr,
token=ERC721(token_id=token_id, contract_addr=contract_addr),
)
res = client.transfer(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["transfer_id"], res
# TODO
def test_okay_simple_erc20(self, client, acc1, acc2):
pass
def test_fails_not_enough_balance(self, client, acc1, acc2):
params = TransferParams(
sender=acc1.addr, receiver=acc2.addr, token=ETH(quantity=100000)
)
res = client.transfer(params, max_retries=1)
res = res.result()
assert res["status"] == "error", res
assert "insufficient balance" in res["result"], res
class TestMint:
def random_token_id(self):
import random
return random.randint(0, 1000000000000000000000000000000)
def test_okay_multiple_targets_and_override_global_royalties(
self, client, acc1, acc2, acc3, contract_addr
):
tid1 = self.random_token_id()
tid2 = self.random_token_id()
tid3 = self.random_token_id()
tid1 = self.random_token_id()
params = MintParams(
contract_addr=contract_addr,
royalties=[Royalty(recipient=acc1.addr, percentage=1.0)],
targets=[
MintTarget(
addr=acc2.addr,
tokens=[
MintableToken(
id=tid1,
blueprint="1",
# tests override global royalties
royalties=[Royalty(recipient=acc2.addr, percentage=2.0)],
),
# tests multiple token mints at a time
MintableToken(id=tid2, blueprint="2"),
],
),
# tests multiple user targets at a time
MintTarget(
addr=acc3.addr, tokens=[MintableToken(id=tid3, blueprint="3")]
),
],
)
res = client.mint(params, max_retries=1)
res = res.result()
assert res["status"] == "success", res
def test_fails_unregistered_contract_addr(
self, client, acc1, unregistered_contract_addr
):
params = MintParams(
contract_addr=unregistered_contract_addr,
targets=[
MintTarget(
addr=acc1.addr,
tokens=[
MintableToken(
id=self.random_token_id(),
blueprint="1",
),
],
),
],
)
res = client.mint(params, max_retries=1)
res = res.result()
assert res["status"] == "error", res
assert "Unique project error: could not find collections project" in res["result"], res
def test_fails_duplicate_asset(self, client, contract_addr, acc1):
params = MintParams(
contract_addr=contract_addr,
targets=[
MintTarget(
addr=acc1.addr,
tokens=[
MintableToken(
id=0,
blueprint="0",
)
],
)
],
)
res = client.mint(params, max_retries=1)
res = res.result()
assert res["status"] == "error", res
assert "asset, duplicate id" in res["result"], res
class TestBurn:
def test_okay_burn(self, client, acc1, contract_addr, minted_nft_id):
# sends the nft to the burn addr, which is <TODO>
params = BurnParams(
sender=acc1.addr,
token=ERC721(token_id=minted_nft_id, contract_addr=contract_addr),
)
res = client.burn(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["transfer_id"], res
class TestWithdrawal:
def test_okay_prepare(self, client, acc1):
params = PrepareWithdrawalParams(
sender=acc1.addr, token=ETH(quantity="0.0000001")
)
res = client.prepare_withdrawal(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["withdrawal_id"], res
def test_okay_complete_withdrawal(self, client, acc1):
# this test is a bit weird, since it can only run if we have
# run prepare_withdrawal before that
balance = client.db.balances(acc1.addr)
withdrawable = int(balance["result"][0]["withdrawable"])
if not withdrawable:
msg = "[WARNING] 'test_okay_complete_withdrawal', can't run since there is "
msg += "no asset to withdraw."
print(msg)
return
params = CompleteWithdrawalParams(token=ETH())
res = client.complete_withdrawal(params)
res = res.result()
# always returns success so no help here
assert res["status"] == "success", res
# TODO the result with each withdrawal a new "random" address dunno why yet tho.
# assert res["result"] == acc1.addr
class TestDeposit:
def test_okay_deposit(self):
pass
def test_okay_depostit_cancel(self):
pass
def test_okay_deposit_reclaim(self):
pass
class TestTrading:
def test_okay_order_sell_and_cancel(
self, client, acc1, minted_nft_id, contract_addr
):
params = CreateOrderParams(
sender=acc1.addr,
token_sell=ERC721(token_id=minted_nft_id, contract_addr=contract_addr),
token_buy=ETH(quantity="0.000001"),
)
res = client.create_order(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["order_id"], res
order_id = res["result"]["order_id"]
params = CancelOrderParams(order_id=order_id)
res = client.cancel_order(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["order_id"] == int(order_id), res
assert not res["result"]["status"], res
def test_okay_order_buy(self):
# TODO I think this didn't work for serveral people, just let it here as
# a reminder to test at some point
pass
def test_okay_create_trade(self, client, acc1, valid_order_params, contract_addr):
order_id, token_id = valid_order_params
params = CreateTradeParams(
sender=acc1.addr,
order_id=order_id,
token_buy=ERC721(token_id=token_id, contract_addr=contract_addr),
token_sell=ETH(quantity="0.000001"),
)
res = client.create_trade(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"]["trade_id"], res
class TestApprovals:
def test_okay_nft(self, client, minted_nft_id, contract_addr):
try:
params = ApproveNFTParams(
token_id=minted_nft_id, contract_addr=contract_addr
)
res = client.approve_nft(params)
res = res.result()
except:
assert False, f"Failed to approve NFT: {res}"
def test_okay_erc20(self, client):
params = ApproveERC20Params(
token=ERC20(
quantity="0.01",
contract_addr="0xccc8cb5229b0ac8069c51fd58367fd1e622afd97",
decimals=18,
as_wei=False,
)
)
res = client.approve_erc20(params)
res = res.result()
assert res["status"] == "success", res
assert res["result"], res
| 33 | 95 | 0.575502 | 1,281 | 11,748 | 5.102264 | 0.20765 | 0.056457 | 0.042228 | 0.049572 | 0.364137 | 0.310741 | 0.298807 | 0.288709 | 0.274327 | 0.252448 | 0 | 0.023002 | 0.322778 | 11,748 | 355 | 96 | 33.092958 | 0.798517 | 0.093718 | 0 | 0.411321 | 0 | 0 | 0.088119 | 0.006967 | 0 | 0 | 0.003954 | 0.002817 | 0.135849 | 1 | 0.098113 | false | 0.022642 | 0.015094 | 0 | 0.158491 | 0.022642 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c589cc34ce97c866cbd9f81718656a998f40685b | 943 | py | Python | NUS-natual/gstCN0270_WEISHUO_assignment4/part3.py | weishuo2/NUS-nature | e18f74b7d51c93cac401a881bb461a46d3f1e42e | [
"MIT"
] | null | null | null | NUS-natual/gstCN0270_WEISHUO_assignment4/part3.py | weishuo2/NUS-nature | e18f74b7d51c93cac401a881bb461a46d3f1e42e | [
"MIT"
] | null | null | null | NUS-natual/gstCN0270_WEISHUO_assignment4/part3.py | weishuo2/NUS-nature | e18f74b7d51c93cac401a881bb461a46d3f1e42e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 11:13:38 2017
@author: 魏硕
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import os
os.chdir("C:\\Users\魏硕\Desktop\hw4")
P=np.mat([[2],[3],[1]])
C=np.mat([[3],[2]])
T1=np.mat([[1,0,-C[0][0]],[0,1,-C[1][0]],[0,0,1]])
T2=np.mat([[1,0,C[0][0]],[0,1,C[1][0]],[0,0,1]])
for i in range(1,8):
k=math.pi * (i/4)
R=np.mat([[math.cos(k),-math.sin(k),0],
[math.sin(k),math.cos(k),0 ],
[0 , 0,1]])
result1=np.dot(T1,P)#将原点变为C后,P的坐标
p1,=plt.plot(result1[0][0],result1[1][0],'ro',label="Trans1")
result2=np.dot(R,result1)#旋转角度k
p2,=plt.plot(result2[0][0],result2[1][0],'bo',label="Rotate")
result3=np.dot(T2,result2)#将原点变回原来的原点
p3,=plt.plot(result3[0][0],result3[1][0],'ko',label="Trans2")
plt.legend(handles = [p1, p2, p3,], labels = ['Trans1', 'Rotate','Trans2'])
plt.savefig("3_result")#设置备注
plt.show()
| 29.46875 | 75 | 0.560976 | 180 | 943 | 2.933333 | 0.4 | 0.049242 | 0.028409 | 0.037879 | 0.068182 | 0.068182 | 0.068182 | 0.068182 | 0.068182 | 0.068182 | 0 | 0.109023 | 0.153765 | 943 | 31 | 76 | 30.419355 | 0.552632 | 0.108165 | 0 | 0 | 0 | 0 | 0.089264 | 0.028951 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58aa965b0f8fd049fb7342758be4a4f77647455 | 3,970 | py | Python | fv-beginner/ex-05-hello/helloworld.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | 1 | 2020-11-09T13:34:02.000Z | 2020-11-09T13:34:02.000Z | fv-beginner/ex-05-hello/helloworld.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | null | null | null | fv-beginner/ex-05-hello/helloworld.py | DonaldKellett/nmigen-beginner | 260ae76a5277e36ec9909aaf6b76acab320aed88 | [
"MIT"
] | null | null | null | from nmigen import *
from nmigen.back.pysim import *
from nmigen.asserts import *
from nmigen.test.utils import *
from nmigen.build import *
from nmigen.build import ResourceError
from nmigen.vendor.lattice_ecp5 import *
from nmigen_boards.resources import *
from functools import reduce
import itertools
import os
import subprocess
from txuart import *
__all__ = ["HelloWorld", "VersaECP5Platform"]
"""
Hello World top-level for RS-232 transmitter, this time formally verified
to behave correctly
See https://zipcpu.com/tutorial/lsn-05-serialtx.pdf for more details
"""
class HelloWorld(Elaboratable):
def __init__(self, msg = "Hello World!", fv_mode = False):
self.i_busy = Signal(1, reset=0)
self.o_wr = Signal(1, reset=0)
self.msg = "%s\n" % msg
self.o_data = Signal(8, reset=ord(self.msg[0]))
self.fv_mode = fv_mode
def ports(self):
return [
self.i_busy,
self.o_wr,
self.o_data
]
def elaborate(self, platform):
m = Module()
o_uart_tx = Signal(1, reset=1)
state = Signal(range(len(self.msg)), reset=0)
if platform is not None and platform != "formal":
o_uart_tx = platform.request("uart").tx.o
m.submodules.txuart = txuart = TXUART(self.o_wr, self.o_data, self.i_busy, \
o_uart_tx, self.fv_mode)
m.d.comb += self.o_wr.eq(~self.i_busy)
with m.FSM():
for i in range(len(self.msg)):
with m.State(str(i)):
m.next = str(i)
with m.If(self.o_wr):
m.next = str((i + 1) % len(self.msg))
if i == len(self.msg) - 1:
m.d.sync += state.eq(0)
else:
m.d.sync += state.eq(state + 1)
m.d.sync += self.o_data.eq(ord(self.msg[(i + 1) % len(self.msg)]))
if self.fv_mode:
"""
Indicator of whether Past() is valid
"""
f_past_valid = Signal(1, reset=0)
m.d.sync += f_past_valid.eq(1)
"""
Assume there is a reasonable upper bound on the consecutive number of clock
cycles that i_busy is asserted, say, 10 * CLOCKS_PER_BAUD
This is required for some assertions to pass k-induction
"""
# CLOCKS_PER_BAUD = 4 in simulation (see txuart.py)
CLOCKS_PER_BAUD = 4
f_past10n_valid = Signal(1, reset=0)
f_past10n_ctr = Signal(range(10 * CLOCKS_PER_BAUD), reset=0)
m.d.sync += f_past10n_ctr.eq(f_past10n_ctr + 1)
with m.If(f_past10n_ctr == 10 * CLOCKS_PER_BAUD - 1):
m.d.sync += f_past10n_ctr.eq(f_past10n_ctr)
m.d.sync += f_past10n_valid.eq(1)
with m.If(f_past10n_valid & reduce(lambda a, b: a & b, \
(Past(self.i_busy, i) for i in range(1, 10 * CLOCKS_PER_BAUD + 1)))):
m.d.comb += Assume(~self.i_busy)
"""
Properties of o_wr
"""
# o_wr is never asserted when i_busy is asserted
with m.If(self.i_busy):
m.d.comb += Assert(~self.o_wr)
"""
Properties of o_data
"""
# o_data holds the correct byte in each respective state
with m.Switch(state):
for i in range(len(self.msg)):
with m.Case(i):
m.d.comb += Assert(self.o_data == ord(self.msg[i]))
"""
Properties regarding state
"""
# Initial state is zero (= transmit first character)
with m.If(~f_past_valid):
m.d.comb += Assert(state == 0)
# o_wr triggers state transitions, and state transitions are correct
with m.If(f_past_valid & Past(self.o_wr)):
m.d.comb += Assert(state == ((Past(state) + 1) % len(self.msg)))
return m
if __name__ == "__main__":
"""
Simulation
"""
m = Module()
m.submodules.helloworld = helloworld = HelloWorld()
sim = Simulator(m)
def process():
for i in range(1000):
yield
sim.add_clock(1e-8)
sim.add_sync_process(process)
with sim.write_vcd('helloworld.vcd', 'helloworld.gtkw', traces=helloworld.ports()):
sim.run()
"""
Formal Verification
"""
class HelloWorldTest(FHDLTestCase):
def test_helloworld(self):
self.assertFormal(HelloWorld(fv_mode=True), mode='prove', depth=66)
HelloWorldTest().test_helloworld()
"""
Build
"""
VersaECP5Platform().build(HelloWorld("FPGA programming with nMigen is fun"), do_program=True) | 26.644295 | 94 | 0.668262 | 640 | 3,970 | 3.9875 | 0.290625 | 0.010188 | 0.024687 | 0.020376 | 0.200235 | 0.127351 | 0.05721 | 0.043887 | 0.043887 | 0.023511 | 0 | 0.022111 | 0.191184 | 3,970 | 149 | 94 | 26.644295 | 0.772656 | 0.067758 | 0 | 0.045977 | 0 | 0 | 0.04199 | 0 | 0 | 0 | 0 | 0 | 0.068966 | 1 | 0.057471 | false | 0 | 0.149425 | 0.011494 | 0.252874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58e592fc012abf8353c94dfea5612b5abbfa28d | 1,704 | py | Python | make_release.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | make_release.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | make_release.py | sairam4123/GodotReleaseScriptPython | 2fd2644b0301f20b89b6772a0c93cec6d012f080 | [
"MIT"
] | null | null | null | import os
import shutil
import subprocess
import sys
from constants import ARGUMENT_PARSER_CREATOR, EXPORT_PATH, EXTENSIONS, FOLDER_NAMES, GODOT, PROJECT_NAME_REPLACED_WITH_HYPENS, RELEASES_FOLDER, TYPE
from version_info import VersionInfo, get_version, set_version
def make_release(platform: str, version: VersionInfo):
if platform not in ['Windows Desktop', 'Mac OSX', 'Linux/X11', 'HTML5']:
raise ValueError(f"can't release for {platform}")
version_path = EXPORT_PATH / FOLDER_NAMES[version.release_level] / str(version)
version_path.mkdir(parents=True, exist_ok=True)
original_path = os.getcwd()
platform_replaced = platform.replace(' ', '-').replace('/', '-')
path = (RELEASES_FOLDER / platform_replaced)
path.mkdir(parents=True, exist_ok=True)
file_base_name = f"{PROJECT_NAME_REPLACED_WITH_HYPENS}-{platform_replaced}-{version}{TYPE[version.release_level]}"
export_file_name = f'{file_base_name}{EXTENSIONS[platform]}'
zip_file_name_7z = f'{file_base_name}.7z'
subprocess.run([GODOT, '--export', f'{platform}', path / export_file_name], shell=True)
os.chdir(str(path))
subprocess.run(['7z', 'a', zip_file_name_7z, '.'], shell=True)
shutil.move(str(path / zip_file_name_7z), str(version_path / zip_file_name_7z))
os.chdir(original_path)
shutil.rmtree(str(path))
def main():
version = get_version()
parser = ARGUMENT_PARSER_CREATOR()
args = parser.parse_args(sys.argv[1:])
if not args.current:
version.increment(args.release_level, args.release_type)
set_version(version)
make_release(args.platform, version)
print("\a")
if __name__ == '__main__': # To test.
main()
| 32.769231 | 149 | 0.717723 | 230 | 1,704 | 5.013043 | 0.33913 | 0.041631 | 0.038161 | 0.0451 | 0.133565 | 0.053773 | 0.053773 | 0 | 0 | 0 | 0 | 0.00693 | 0.153169 | 1,704 | 51 | 150 | 33.411765 | 0.7921 | 0.004695 | 0 | 0 | 0 | 0 | 0.14817 | 0.077922 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.228571 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58ef2eeeef23f0692b1b9ccea0dbf6d23c475a2 | 5,598 | py | Python | tests/day12_spec.py | tysonmcnulty/advent-of-code-2021 | 7a0f2852b203fb8b87f60534676e01eda5a5c6a7 | [
"MIT"
] | null | null | null | tests/day12_spec.py | tysonmcnulty/advent-of-code-2021 | 7a0f2852b203fb8b87f60534676e01eda5a5c6a7 | [
"MIT"
] | null | null | null | tests/day12_spec.py | tysonmcnulty/advent-of-code-2021 | 7a0f2852b203fb8b87f60534676e01eda5a5c6a7 | [
"MIT"
] | null | null | null | import os
import unittest
from collections import Counter
from src.day12 import load_cave_connections, CaveMap
class Day12Tests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cave_connections_test = load_cave_connections('data/day12_cave_connections_test.txt')
cls.cave_connections_tm = load_cave_connections('data/day12_cave_connections_tm.txt')
cls.cave_map_test = CaveMap(connections = cls.cave_connections_test)
cls.cave_map_tm = CaveMap(connections = cls.cave_connections_tm)
def test_load_cave_connections(self):
self.assertEqual([
{ "start", "A" },
{ "start", "b" },
{ "A", "c" },
{ "A", "b" },
{ "b", "d" },
{ "A", "end" },
{ "b", "end" },
], self.cave_connections_test)
def test_cave_map(self):
expected_cave_map = CaveMap()
expected_cave_map.add_cave("start")
expected_cave_map.add_cave("end")
expected_cave_map.add_cave("A")
expected_cave_map.add_connection("start", "b")
expected_cave_map.add_connection("end", "b")
expected_cave_map.add_path("c", "A", "b", "d")
expected_cave_map.add_path("start", "A", "end")
self.assertEqual(expected_cave_map, self.cave_map_test)
def test_find_all_paths_when_revisitable_is_never(self):
self.assertEqual(set(), CaveMap().find_all_paths("A", "A"))
self.assertEqual({tuple("A")}, CaveMap(caves = ["A"]).find_all_paths("A", "A"))
self.assertEqual(set(), CaveMap(caves = ["A", "B"]).find_all_paths("A", "B"))
self.assertEqual({("A", "B")}, CaveMap(connections = [{"A", "B"}]).find_all_paths("A", "B"))
self.assertEqual({
("start", "A", "end"),
("start", "b", "end"),
("start", "A", "b", "end"),
("start", "b", "A", "end"),
}, self.cave_map_test.find_all_paths("start", "end"))
def test_find_all_paths_when_large_caves_are_revisitable(self):
self.assertEqual({
("start", "A", "b", "A" , "c", "A", "end"),
("start", "A", "c", "A" , "b", "A", "end"),
("start", "A", "c", "A" , "b", "end"),
("start", "b", "A", "c" , "A", "end"),
("start", "A", "c", "A" , "end"),
("start", "A", "b", "A" , "end"),
("start", "A", "b", "end"),
("start", "A", "end"),
("start", "b", "A", "end"),
("start", "b", "end"),
}, self.cave_map_test.find_all_paths("start", "end", revisitable = is_large_cave))
all_paths_tm = self.cave_map_tm.find_all_paths("start", "end", revisitable = is_large_cave)
self.assertEqual(4338, len(all_paths_tm))
def test_find_all_paths_when_large_caves_and_one_small_cave_are_revisitable(self):
self.assertEqual({
('start', 'A', 'end'),
('start', 'b', 'end'),
('start', 'A', 'b', 'end'),
('start', 'b', 'A', 'end'),
('start', 'A', 'c', 'A', 'end'),
('start', 'A', 'b', 'A', 'end'),
('start', 'b', 'A', 'b', 'end'),
('start', 'b', 'd', 'b', 'end'),
('start', 'A', 'c', 'A', 'b', 'end'),
('start', 'A', 'b', 'A', 'b', 'end'),
('start', 'A', 'b', 'd', 'b', 'end'),
('start', 'b', 'A', 'c', 'A', 'end'),
('start', 'b', 'A', 'b', 'A', 'end'),
('start', 'b', 'd', 'b', 'A', 'end'),
('start', 'A', 'c', 'A', 'c', 'A', 'end'),
('start', 'A', 'c', 'A', 'b', 'A', 'end'),
('start', 'A', 'b', 'A', 'c', 'A', 'end'),
('start', 'A', 'b', 'A', 'b', 'A', 'end'),
('start', 'A', 'b', 'd', 'b', 'A', 'end'),
('start', 'b', 'A', 'c', 'A', 'b', 'end'),
('start', 'A', 'c', 'A', 'c', 'A', 'b', 'end'),
('start', 'A', 'c', 'A', 'b', 'A', 'b', 'end'),
('start', 'A', 'c', 'A', 'b', 'd', 'b', 'end'),
('start', 'A', 'b', 'A', 'c', 'A', 'b', 'end'),
('start', 'b', 'A', 'c', 'A', 'c', 'A', 'end'),
('start', 'b', 'A', 'c', 'A', 'b', 'A', 'end'),
('start', 'b', 'A', 'b', 'A', 'c', 'A', 'end'),
('start', 'b', 'd', 'b', 'A', 'c', 'A', 'end'),
('start', 'A', 'c', 'A', 'c', 'A', 'b', 'A', 'end'),
('start', 'A', 'c', 'A', 'b', 'A', 'c', 'A', 'end'),
('start', 'A', 'c', 'A', 'b', 'A', 'b', 'A', 'end'),
('start', 'A', 'c', 'A', 'b', 'd', 'b', 'A', 'end'),
('start', 'A', 'b', 'A', 'c', 'A', 'c', 'A', 'end'),
('start', 'A', 'b', 'A', 'c', 'A', 'b', 'A', 'end'),
('start', 'A', 'b', 'A', 'b', 'A', 'c', 'A', 'end'),
('start', 'A', 'b', 'd', 'b', 'A', 'c', 'A', 'end'),
}, self.cave_map_test.find_all_paths("start", "end", revisitable = is_large_cave_or_no_small_caves_revisited))
@unittest.skipUnless(bool(os.getenv('AOC_RUN_SLOW_TESTS')), 'slow test')
def test_find_all_paths_when_large_caves_and_one_small_cave_are_revisitable_tm(self):
all_paths_tm = self.cave_map_tm.find_all_paths("start", "end", revisitable = is_large_cave_or_no_small_caves_revisited)
self.assertEqual(114189, len(all_paths_tm))
is_large_cave = lambda cave, partial_path: cave.isupper()
is_large_cave_or_no_small_caves_revisited = lambda cave, partial_path: cave.isupper() or (
cave not in { "start", "end" } and
not any(times_visited >= 2 for cave, times_visited in Counter(partial_path).items() if cave.islower())
)
| 45.885246 | 127 | 0.475884 | 728 | 5,598 | 3.427198 | 0.105769 | 0.150701 | 0.042084 | 0.08016 | 0.703808 | 0.593587 | 0.551904 | 0.453307 | 0.370341 | 0.301804 | 0 | 0.004618 | 0.265095 | 5,598 | 121 | 128 | 46.264463 | 0.601847 | 0 | 0 | 0.104762 | 0 | 0 | 0.148446 | 0.012504 | 0 | 0 | 0 | 0 | 0.104762 | 1 | 0.066667 | false | 0 | 0.038095 | 0 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c58fe5984f7a373e55e2f2765e080b6266894e36 | 1,190 | py | Python | KookminAnnotoriousOpenPlatform/cropy/expressJson.py | wuliupo/annotorious | fe07316e78dd00d06484f5f0de88d110df7928db | [
"MIT"
] | 1 | 2018-04-14T08:33:44.000Z | 2018-04-14T08:33:44.000Z | KookminAnnotoriousOpenPlatform/cropy/expressJson.py | wuliupo/annotorious | fe07316e78dd00d06484f5f0de88d110df7928db | [
"MIT"
] | 1 | 2018-05-31T04:47:29.000Z | 2018-06-19T07:59:44.000Z | KookminAnnotoriousOpenPlatform/cropy/expressJson.py | wuliupo/annotorious | fe07316e78dd00d06484f5f0de88d110df7928db | [
"MIT"
] | 1 | 2018-04-14T09:51:37.000Z | 2018-04-14T09:51:37.000Z | import zipfile
import datetime
import os
class ExpressJson():
def express(self, zp, path, img):
zp.write(path + "/" + img)
def express_all_json(self, zp):
path = "/var/www/html/jsondata"
for json in os.listdir(path):
if json.rfind('.json') > 0:
#print(path + "/" + img)
self.express(zp,path,json)
return 1
def run(self):
time = datetime.datetime.now()
now = str(time.year) + str(time.month) + \
str(time.day) + str(time.hour) + str(time.minute)
zp = zipfile.ZipFile(now + "_jsonExpr.zip", "w")
isTrue = self.express_all_json(zp)
zp.close()
if isTrue != 1:
print("error")
# let you consider to.
try:
self.delete_orginal_file_all()
except Exception as delErr:
print("json delete error\n", delErr)
def delete_orginal_file_all(self):
path = "/var/www/html/jsondata/"
for json in os.listdir(path):
if json.rfind('.json') > 0:
os.remove(path+json)
| 28.333333 | 64 | 0.49916 | 139 | 1,190 | 4.194245 | 0.410072 | 0.060034 | 0.034305 | 0.048027 | 0.205832 | 0.205832 | 0.205832 | 0.205832 | 0.205832 | 0.205832 | 0 | 0.005398 | 0.377311 | 1,190 | 41 | 65 | 29.02439 | 0.781377 | 0.036975 | 0 | 0.133333 | 0 | 0 | 0.085222 | 0.040798 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.1 | 0 | 0.3 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5927875e7ffbf7da50e58c258e7f66b91124459 | 957 | py | Python | code/fe/40lda.py | okotaku/pet_finder | 380e4f19172e06e92b5b752f59e2902efa6aee1f | [
"MIT"
] | 34 | 2019-07-31T01:17:18.000Z | 2020-11-15T20:01:30.000Z | code/fe/40lda.py | okotaku/pet_finder | 380e4f19172e06e92b5b752f59e2902efa6aee1f | [
"MIT"
] | null | null | null | code/fe/40lda.py | okotaku/pet_finder | 380e4f19172e06e92b5b752f59e2902efa6aee1f | [
"MIT"
] | 6 | 2019-07-31T07:21:35.000Z | 2021-05-21T12:46:06.000Z | from collections import defaultdict
from gensim.models import LdaModel
from gensim.corpora.dictionary import Dictionary
from keras.preprocessing.text import text_to_word_sequence
from utils import *
def w2v(train_text, n_topics=5):
train_corpus = [text_to_word_sequence(text) for text in train_text]
dictionary = Dictionary(train_corpus)
score_by_topic = defaultdict(int)
corpus = [dictionary.doc2bow(text) for text in train_corpus]
model = LdaModel(corpus=corpus, num_topics=n_topics, id2word=dictionary)
lda_score = []
for text in corpus:
scores = []
for topic, score in model[text]:
scores.append(float(score))
lda_score.append(scores)
w2v_cols = ["lda{}".format(i) for i in range(n_topics)]
result = pd.DataFrame(lda_score, columns=w2v_cols)
return result
if __name__ == '__main__':
result = w2v(train["Description"])
result.to_feather("../feature/lda.feather") | 30.870968 | 76 | 0.715778 | 129 | 957 | 5.069767 | 0.403101 | 0.03211 | 0.041284 | 0.055046 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008974 | 0.184953 | 957 | 31 | 77 | 30.870968 | 0.829487 | 0 | 0 | 0 | 0 | 0 | 0.048017 | 0.022965 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.217391 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5953d849b706a374486e6cf22aba82d567878ff | 1,080 | py | Python | examples/transform/box_cox_transform.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | examples/transform/box_cox_transform.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | examples/transform/box_cox_transform.py | CU-NESS/distpy | 279ba7e46726a85246566401fca19b8739d18d08 | [
"Apache-2.0"
] | null | null | null | """
File: examples/transform/box_cox_transform.py
Author: Keith Tauscher
Date: 2 Oct 2018
Description: Example showing how to use the BoxCoxTransform class.
"""
import os
import numpy as np
from distpy import BoxCoxTransform, cast_to_transform,\
load_transform_from_hdf5_file
num_channels = 100
x_values = np.linspace(-10, 10, num_channels)
null_transform = BoxCoxTransform(1, offset=1)
hdf5_file_name = 'TESTING_BOXCOX_TRANSFORM_CLASS.hdf5'
try:
null_transform.save(hdf5_file_name)
assert(null_transform == load_transform_from_hdf5_file(hdf5_file_name))
except:
if os.path.exists(hdf5_file_name):
os.remove(hdf5_file_name)
raise
else:
os.remove(hdf5_file_name)
assert(null_transform == cast_to_transform('box-cox 1 1'))
assert(np.allclose(null_transform(x_values), x_values))
assert(\
np.allclose(null_transform.derivative(x_values), x_values ** 0))
assert(np.allclose(null_transform.second_derivative(x_values),\
np.zeros_like(x_values)))
assert(np.allclose(null_transform.third_derivative(x_values),\
np.zeros_like(x_values)))
| 28.421053 | 75 | 0.780556 | 162 | 1,080 | 4.882716 | 0.382716 | 0.079646 | 0.091024 | 0.101138 | 0.432364 | 0.323641 | 0.17067 | 0.088496 | 0 | 0 | 0 | 0.027225 | 0.115741 | 1,080 | 37 | 76 | 29.189189 | 0.801047 | 0.141667 | 0 | 0.16 | 0 | 0 | 0.050109 | 0.038126 | 0 | 0 | 0 | 0 | 0.24 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c595c0a3f076c39bf37561a072f7a3bd28ce5ddc | 9,071 | py | Python | kettle/make_json.py | tehcyx/test-infra-k8s | c508de13c92daeda585fee78267d6da574a272aa | [
"Apache-2.0"
] | 1 | 2019-08-22T03:18:28.000Z | 2019-08-22T03:18:28.000Z | kettle/make_json.py | tehcyx/test-infra-k8s | c508de13c92daeda585fee78267d6da574a272aa | [
"Apache-2.0"
] | null | null | null | kettle/make_json.py | tehcyx/test-infra-k8s | c508de13c92daeda585fee78267d6da574a272aa | [
"Apache-2.0"
] | 1 | 2019-12-12T22:42:23.000Z | 2019-12-12T22:42:23.000Z | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate JSON for BigQuery importing."""
import argparse
import logging
import json
import os
import subprocess
import sys
import time
import traceback
try:
import defusedxml.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import model
def parse_junit(xml):
"""Generate failed tests as a series of dicts. Ignore skipped tests."""
# NOTE: this is modified from gubernator/view_build.py
tree = ET.fromstring(xml)
# pylint: disable=redefined-outer-name
def make_result(name, time, failure_text):
if failure_text:
if time is None:
return {'name': name, 'failed': True, 'failure_text': failure_text}
return {'name': name, 'time': time, 'failed': True, 'failure_text': failure_text}
if time is None:
return {'name': name}
return {'name': name, 'time': time}
# Note: skipped tests are ignored because they make rows too large for BigQuery.
# Knowing that a given build could have ran a test but didn't for some reason
# isn't very interesting.
if tree.tag == 'testsuite':
for child in tree.findall('testcase'):
name = child.attrib['name']
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text)
elif tree.tag == 'testsuites':
for testsuite in tree:
suite_name = testsuite.attrib['name']
for child in testsuite.findall('testcase'):
name = '%s %s' % (suite_name, child.attrib['name'])
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text)
else:
logging.error('unable to find failures, unexpected tag %s', tree.tag)
def buckets_yaml():
import yaml # does not support pypy
with open(os.path.dirname(os.path.abspath(__file__))+'/buckets.yaml') as fp:
return yaml.load(fp)
# pypy compatibility hack
def python_buckets_yaml(python='python2'):
return json.loads(subprocess.check_output(
[python, '-c', 'import json,yaml; print json.dumps(yaml.load(open("buckets.yaml")))'],
cwd=os.path.dirname(os.path.abspath(__file__))))
for attempt in [python_buckets_yaml, buckets_yaml, lambda: python_buckets_yaml(python='python')]:
try:
BUCKETS = attempt()
break
except (ImportError, OSError):
traceback.print_exc()
else:
# pylint: disable=misplaced-bare-raise
# This is safe because the only way we get here is by faling all attempts
raise
def path_to_job_and_number(path):
assert not path.endswith('/')
for bucket, meta in BUCKETS.iteritems():
if path.startswith(bucket):
prefix = meta['prefix']
break
else:
if path.startswith('gs://kubernetes-jenkins/pr-logs'):
prefix = 'pr:'
else:
raise ValueError('unknown build path')
build = os.path.basename(path)
job = prefix + os.path.basename(os.path.dirname(path))
try:
return job, int(build)
except ValueError:
return job, None
def row_for_build(path, started, finished, results):
tests = []
for result in results:
for test in parse_junit(result):
if '#' in test['name'] and not test.get('failed'):
continue # skip successful repeated tests
tests.append(test)
build = {
'path': path,
'test': tests,
'tests_run': len(tests),
'tests_failed': sum(t.get('failed', 0) for t in tests)
}
job, number = path_to_job_and_number(path)
build['job'] = job
if number:
build['number'] = number
if started:
build['started'] = int(started['timestamp'])
if 'node' in started:
build['executor'] = started['node']
if finished:
build['finished'] = int(finished['timestamp'])
if 'result' in finished:
build['result'] = finished['result']
build['passed'] = build['result'] == 'SUCCESS'
elif isinstance(finished.get('passed'), bool):
build['passed'] = finished['passed']
build['result'] = 'SUCCESS' if build['passed'] else 'FAILURE'
if 'version' in finished:
build['version'] = finished['version']
def get_metadata():
metadata = None
if finished and 'metadata' in finished:
metadata = finished['metadata']
elif started:
metadata = started.get('metadata')
if metadata:
# clean useless/duplicated metadata fields
if 'repo' in metadata and not metadata['repo']:
metadata.pop('repo')
build_version = build.get('version', 'N/A')
if metadata.get('job-version') == build_version:
metadata.pop('job-version')
if metadata.get('version') == build_version:
metadata.pop('version')
for key, value in metadata.items():
if not isinstance(value, basestring):
# the schema specifies a string value. force it!
metadata[key] = json.dumps(value)
if not metadata:
return None
return [{'key': k, 'value': v} for k, v in sorted(metadata.items())]
metadata = get_metadata()
if metadata:
build['metadata'] = metadata
if started and finished:
build['elapsed'] = build['finished'] - build['started']
return build
def get_table(days):
if days:
return ('build_emitted_%g' % days).replace('.', '_')
return 'build_emitted'
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--days', type=float, default=0,
help='Grab data for builds within N days')
parser.add_argument('--assert-oldest', type=float,
help='Exit nonzero if a build older than X days was emitted previously.')
parser.add_argument('--reset-emitted', action='store_true',
help='Clear list of already-emitted builds.')
parser.add_argument('paths', nargs='*',
help='Options list of gs:// paths to dump rows for.')
return parser.parse_args(args)
def make_rows(db, builds):
for rowid, path, started, finished in builds:
try:
results = db.test_results_for_build(path)
yield rowid, row_for_build(path, started, finished, results)
except IOError:
return
except: # pylint: disable=bare-except
logging.exception('error on %s', path)
def main(db, opts, outfile):
min_started = None
if opts.days:
min_started = time.time() - (opts.days or 1) * 24 * 60 * 60
incremental_table = get_table(opts.days)
if opts.assert_oldest:
oldest = db.get_oldest_emitted(incremental_table)
if oldest < time.time() - opts.assert_oldest * 24 * 60 * 60:
return 1
return 0
if opts.reset_emitted:
db.reset_emitted(incremental_table)
if opts.paths:
# When asking for rows for specific builds, use a dummy table and clear it first.
incremental_table = 'incremental_manual'
db.reset_emitted(incremental_table)
builds = list(db.get_builds_from_paths(opts.paths, incremental_table))
else:
builds = db.get_builds(min_started=min_started, incremental_table=incremental_table)
rows_emitted = set()
for rowid, row in make_rows(db, builds):
json.dump(row, outfile, sort_keys=True)
outfile.write('\n')
rows_emitted.add(rowid)
if rows_emitted:
gen = db.insert_emitted(rows_emitted, incremental_table=incremental_table)
print >>sys.stderr, 'incremental progress gen #%d' % gen
else:
print >>sys.stderr, 'no rows emitted'
return 0
if __name__ == '__main__':
DB = model.Database()
OPTIONS = parse_args(sys.argv[1:])
sys.exit(main(DB, OPTIONS, sys.stdout))
| 34.888462 | 97 | 0.614486 | 1,121 | 9,071 | 4.868867 | 0.285459 | 0.024185 | 0.01026 | 0.009894 | 0.159765 | 0.129718 | 0.106632 | 0.082081 | 0.082081 | 0.068523 | 0 | 0.004565 | 0.275493 | 9,071 | 259 | 98 | 35.023166 | 0.825928 | 0.147283 | 0 | 0.188776 | 0 | 0 | 0.131999 | 0.009614 | 0 | 0 | 0 | 0 | 0.020408 | 1 | 0.056122 | false | 0.020408 | 0.076531 | 0.005102 | 0.22449 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c59980054887fc33f8080565f454fb7c15dd88d7 | 12,369 | py | Python | modules/UserHandling/backend/middleware.py | kushalsingh-00/aerial_wildlife_detection | 6f6c89a5633bf7fce6dc393d7aaa780a51c4c745 | [
"MIT"
] | 1 | 2020-08-18T21:40:06.000Z | 2020-08-18T21:40:06.000Z | modules/UserHandling/backend/middleware.py | kushalsingh-00/aerial_wildlife_detection | 6f6c89a5633bf7fce6dc393d7aaa780a51c4c745 | [
"MIT"
] | null | null | null | modules/UserHandling/backend/middleware.py | kushalsingh-00/aerial_wildlife_detection | 6f6c89a5633bf7fce6dc393d7aaa780a51c4c745 | [
"MIT"
] | 1 | 2020-08-18T21:40:15.000Z | 2020-08-18T21:40:15.000Z | '''
Provides functionality for checking login details,
session validity, and the like.
2019 Benjamin Kellenberger
'''
from threading import Thread
from modules.Database.app import Database
import psycopg2
from datetime import timedelta
from util.helpers import current_time
import secrets
import hashlib
import bcrypt
from .exceptions import *
class UserMiddleware():
TOKEN_NUM_BYTES = 64
SALT_NUM_ROUNDS = 12
def __init__(self, config):
self.config = config
self.dbConnector = Database(config)
self.usersLoggedIn = {} # username -> {timestamp, sessionToken}
def _current_time(self):
return current_time()
def _create_token(self):
return secrets.token_urlsafe(self.TOKEN_NUM_BYTES)
def _compare_tokens(self, tokenA, tokenB):
if tokenA is None or tokenB is None:
return False
return secrets.compare_digest(tokenA, tokenB)
def _check_password(self, providedPass, hashedTargetPass):
return bcrypt.checkpw(providedPass, hashedTargetPass)
def _create_hash(self, password):
hash = bcrypt.hashpw(password, bcrypt.gensalt(self.SALT_NUM_ROUNDS))
return hash
def _get_user_data(self, username):
sql = 'SELECT last_login, session_token, isAdmin FROM {}.user WHERE name = %s;'.format(
self.config.getProperty('Database', 'schema')
)
result = self.dbConnector.execute(sql, (username,), numReturn=1)
if not len(result):
return None
result = result[0]
return result
def _extend_session_database(self, username, sessionToken):
'''
Updates the last login timestamp of the user to the current
time and commits the changes to the database.
Runs in a thread to be non-blocking.
'''
def _extend_session():
now = self._current_time()
self.dbConnector.execute('''UPDATE {}.user SET last_login = %s,
session_token = %s
WHERE name = %s
'''.format(
self.config.getProperty('Database', 'schema')
),
(now, sessionToken, username,),
numReturn=None)
# also update local cache
self.usersLoggedIn[username]['timestamp'] = now
eT = Thread(target=_extend_session)
eT.start()
def _init_or_extend_session(self, username, sessionToken=None):
'''
Establishes a "session" for the user (i.e., sets 'time_login'
to now).
Also creates a new sessionToken if None provided.
'''
now = self._current_time()
if sessionToken is None:
sessionToken = self._create_token()
# new session created; add to database
self.dbConnector.execute('''UPDATE {}.user SET last_login = %s, session_token = %s
WHERE name = %s
'''.format(
self.config.getProperty('Database', 'schema')
),
(now, sessionToken, username,),
numReturn=None)
# fetch user metadata and store locally
userData = self._get_user_data(username)
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken,
'isAdmin': userData['isadmin']
}
# update local cache as well
if not username in self.usersLoggedIn:
# fetch user metadata and store locally
userData = self._get_user_data(username)
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken,
'isAdmin': userData['isadmin']
}
else:
self.usersLoggedIn[username]['timestamp'] = now
self.usersLoggedIn[username]['sessionToken'] = sessionToken
# also tell DB about updated tokens
self._extend_session_database(username, sessionToken)
expires = now + timedelta(0, self.config.getProperty('UserHandler', 'time_login', type=int))
return sessionToken, now, self.usersLoggedIn[username]['isAdmin'], expires
def _invalidate_session(self, username):
if username in self.usersLoggedIn:
del self.usersLoggedIn[username]
self.dbConnector.execute(
'UPDATE {}.user SET session_token = NULL WHERE name = %s'.format(
self.config.getProperty('Database', 'schema')
),
(username,),
numReturn=None)
#TODO: feedback that everything is ok?
def _check_account_exists(self, username, email):
response = {
'username': True,
'email': True
}
if username is None or not len(username): username = ''
if email is None or not len(email): email = ''
result = self.dbConnector.execute('SELECT COUNT(name) AS c FROM {schema}.user WHERE name = %s UNION ALL SELECT COUNT(name) AS c FROM {schema}.user WHERE email = %s'.format(
schema=self.config.getProperty('Database', 'schema')
),
(username,email,),
numReturn=2)
response['username'] = (result[0]['c'] > 0)
response['email'] = (result[1]['c'] > 0)
return response
def _check_logged_in(self, username, sessionToken):
now = self._current_time()
time_login = self.config.getProperty('UserHandler', 'time_login', type=int)
if not username in self.usersLoggedIn:
# check database
result = self._get_user_data(username)
if result is None:
# account does not exist
return False
# check for session token
if not self._compare_tokens(result['session_token'], sessionToken):
# invalid session token provided
return False
# check for timestamp
time_diff = (now - result['last_login']).total_seconds()
if time_diff <= time_login:
# user still logged in
if not username in self.usersLoggedIn:
self.usersLoggedIn[username] = {
'timestamp': now,
'sessionToken': sessionToken,
'isAdmin': result['isadmin']
}
else:
self.usersLoggedIn[username]['timestamp'] = now
# extend user session (commit to DB) if needed
if time_diff >= 0.75 * time_login:
self._extend_session_database(username, sessionToken)
return True
else:
# session time-out
return False
# generic error
return False
else:
# check locally
if not self._compare_tokens(self.usersLoggedIn[username]['sessionToken'],
sessionToken):
# invalid session token provided; check database if token has updated
# (can happen if user logs in again from another machine)
result = self._get_user_data(username)
if not self._compare_tokens(result['session_token'],
sessionToken):
return False
else:
# update local cache
self.usersLoggedIn[username]['sessionToken'] = result['session_token']
self.usersLoggedIn[username]['timestamp'] = now
if (now - self.usersLoggedIn[username]['timestamp']).total_seconds() <= time_login:
# user still logged in
return True
else:
# session time-out
return False
# generic error
return False
# generic error
return False
def isAuthenticated(self, username, sessionToken, admin=False):
'''
Checks if the user is logged in.
If 'admin' is True, returns True only if the user is
logged in and an administrator.
'''
loggedIn = self._check_logged_in(username, sessionToken)
if not loggedIn:
return False
elif not admin:
self._init_or_extend_session(username, sessionToken)
return True
else:
if username in self.usersLoggedIn and \
'isAdmin' in self.usersLoggedIn[username] and \
self.usersLoggedIn[username]['isAdmin'] is True:
# is logged in *and* admin
self._init_or_extend_session(username, sessionToken)
return True
def getLoginData(self, username, sessionToken):
'''
Performs a lookup on the login timestamp dict.
If the username cannot be found (also not in the database),
they are not logged in (False returned).
If the difference between the current time and the recorded
login timestamp exceeds a pre-defined threshold, the user is
removed from the dict and False is returned.
Otherwise returns True if and only if 'sessionToken' matches
the entry in the database.
'''
if self._check_logged_in(username, sessionToken):
# still logged in; extend session
sessionToken, now, isAdmin, expires = self._init_or_extend_session(username, sessionToken)
return sessionToken, now, isAdmin, expires
else:
# not logged in or error
raise Exception('Not logged in.')
def login(self, username, password, sessionToken):
# check if logged in
if self._check_logged_in(username, sessionToken):
# still logged in; extend session
sessionToken, now, isAdmin, expires = self._init_or_extend_session(username, sessionToken)
return sessionToken, now, isAdmin, expires
# get user info
userData = self.dbConnector.execute(
'SELECT hash FROM {}.user WHERE name = %s;'.format(
self.config.getProperty('Database', 'schema')
),
(username,),
numReturn=1
)
if len(userData) == 0:
# account does not exist
raise InvalidRequestException()
userData = userData[0]
# verify provided password
if self._check_password(password.encode('utf8'), bytes(userData['hash'])):
# correct
sessionToken, timestamp, isAdmin, expires = self._init_or_extend_session(username, None)
return sessionToken, timestamp, isAdmin, expires
else:
# incorrect
self._invalidate_session(username)
raise InvalidPasswordException()
def logout(self, username, sessionToken):
# check if logged in first
if self._check_logged_in(username, sessionToken):
self._invalidate_session(username)
def accountExists(self, username, email):
return self._check_account_exists(username, email)
def createAccount(self, username, password, email):
accExstChck = self._check_account_exists(username, email)
if accExstChck['username'] or accExstChck['email']:
raise AccountExistsException(username)
else:
hash = self._create_hash(password.encode('utf8'))
sql = '''
INSERT INTO {}.user (name, email, hash)
VALUES (%s, %s, %s);
'''.format(self.config.getProperty('Database', 'schema'))
self.dbConnector.execute(sql,
(username, email, hash,),
numReturn=None)
sessionToken, timestamp, _, expires = self._init_or_extend_session(username)
return sessionToken, timestamp, expires
def getUserNames(self):
sql = 'SELECT name FROM {}.user'.format(self.config.getProperty('Database', 'schema'))
result = self.dbConnector.execute(sql, None, 'all')
response = [r['name'] for r in result]
return response | 34.842254 | 180 | 0.574743 | 1,240 | 12,369 | 5.608871 | 0.183065 | 0.05133 | 0.057513 | 0.043997 | 0.445866 | 0.387203 | 0.305823 | 0.26729 | 0.237239 | 0.211646 | 0 | 0.003063 | 0.340125 | 12,369 | 355 | 181 | 34.842254 | 0.849057 | 0.148516 | 0 | 0.417062 | 0 | 0.004739 | 0.111122 | 0 | 0 | 0 | 0 | 0.002817 | 0 | 1 | 0.094787 | false | 0.042654 | 0.042654 | 0.018957 | 0.2891 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c59a014cb5abfe82cceeeab7119f93b6b7cb8b66 | 737 | py | Python | network/examples/getwebpage.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 3 | 2017-09-03T17:17:44.000Z | 2017-12-10T12:26:46.000Z | network/examples/getwebpage.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | null | null | null | network/examples/getwebpage.py | flashypepo/myMicropython-Examples | b2b63df865b5ad471b351ca5f279135025859f5d | [
"MIT"
] | 2 | 2017-10-01T01:10:55.000Z | 2018-07-15T19:49:29.000Z | # example to download a webpage
# 2017-0812 PePo okay, URL must contain at least 3 parts
# URL: http://docs.micropython.org/en/latest/esp8266/esp8266/tutorial/network_tcp.html
import socket
def http_get(url):
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
#examples
http_get('http://micropython.org/ks/test.html')
#watch out for next one, >38664 records at 2017-0812
http_get('http://pepo.nl/ds3231/list_ds3231.php')
| 29.48 | 86 | 0.628223 | 116 | 737 | 3.931034 | 0.62931 | 0.046053 | 0.048246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085324 | 0.204885 | 737 | 24 | 87 | 30.708333 | 0.692833 | 0.309362 | 0 | 0 | 0 | 0.0625 | 0.232604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c59cd877502bd1ce7ba3c219b2b953600eecf639 | 2,644 | py | Python | GHT.py | yoyoyoohh/PolSAR_unsupervised_CD | 063986eb6fa172e861ccc556bf8806767bc89624 | [
"Apache-2.0"
] | 3 | 2021-10-14T03:49:37.000Z | 2022-02-16T01:16:08.000Z | GHT.py | slchenchn/PolSAR-unsupervised-change-detection | e5788d59c7d209546216b7d4e3ed1931a1bed816 | [
"Apache-2.0"
] | null | null | null | GHT.py | slchenchn/PolSAR-unsupervised-change-detection | e5788d59c7d209546216b7d4e3ed1931a1bed816 | [
"Apache-2.0"
] | 3 | 2021-07-04T08:24:56.000Z | 2022-02-09T14:08:50.000Z | '''
Author: Shuailin Chen
Created Date: 2021-05-13
Last Modified: 2021-05-19
content: copied from paper "A Generalization of Otsu's Method and
Minimum Error Thresholding"
'''
import numpy as np
csum = lambda z: np. cumsum (z )[: -1]
dsum = lambda z: np. cumsum (z [:: -1])[ -2:: -1]
argmax = lambda x, f: np. mean (x [: -1][ f == np. max (f )]) # Use the mean for ties .
clip = lambda z: np. maximum (1e-30 , z)
def preliminaries (n, x):
""" Some math that is shared across each algorithm ."""
assert np. all (n >= 0)
x = np. arange ( len (n), dtype =n. dtype ) if x is None else x
assert np. all (x [1:] >= x [: -1])
w0 = clip ( csum (n))
w1 = clip ( dsum (n))
p0 = w0 / (w0 + w1)
p1 = w1 / (w0 + w1)
mu0 = csum (n * x) / w0
mu1 = dsum (n * x) / w1
d0 = csum (n * x **2) - w0 * mu0 **2
d1 = dsum (n * x **2) - w1 * mu1 **2
return x, w0 , w1 , p0 , p1 , mu0 , mu1 , d0 , d1
def Otsu (n, x= None ):
""" Otsu 's method ."""
x, w0 , w1 , _, _, mu0 , mu1 , _, _ = preliminaries (n, x)
o = w0 * w1 * ( mu0 - mu1 )**2
return argmax (x, o), o
def Otsu_equivalent (n, x= None ):
""" Equivalent to Otsu 's method ."""
x, _, _, _, _, _, _, d0 , d1 = preliminaries (n, x)
o = np. sum (n) * np. sum (n * x **2) - np. sum(n * x )**2 - np. sum (n) * (d0 + d1)
return argmax (x, o), o
def MET (n, x= None ):
""" Minimum Error Thresholding ."""
x, w0 , w1 , _, _, _, _, d0 , d1 = preliminaries (n, x)
ell = (1 + w0 * np. log ( clip (d0 / w0 )) + w1 * np. log ( clip (d1 / w1 ))
- 2 * (w0 * np. log ( clip (w0 )) + w1 * np. log ( clip (w1 ))))
return argmax (x, -ell ), ell # argmin ()
def wprctile (n, x=None , omega =0.5):
""" Weighted percentile , with weighted median as default ."""
assert omega >= 0 and omega <= 1
x, _, _, p0 , p1 , _, _, _, _ = preliminaries (n, x)
h = -omega * np. log( clip (p0 )) - (1. - omega ) * np. log ( clip (p1 ))
return argmax (x, -h), h # argmin ()
def GHT (n, x=None , nu =0, tau =0, kappa =0, omega =0.5):
""" Our generalization of the above algorithms ."""
assert nu >= 0
assert tau >= 0
assert kappa >= 0
assert omega >= 0 and omega <= 1
x, w0 , w1 , p0 , p1 , _, _, d0 , d1 = preliminaries (n, x)
v0 = clip (( p0 * nu * tau **2 + d0) / (p0 * nu + w0 ))
v1 = clip (( p1 * nu * tau **2 + d1) / (p1 * nu + w1 ))
f0 = -d0 / v0 - w0 * np. log (v0) + 2 * (w0 + kappa * omega ) * np.log (w0)
f1 = -d1 / v1 - w1 * np. log (v1) + 2 * (w1 + kappa * (1 - omega )) * np. log(w1)
return argmax (x, f0 + f1), f0 + f1 | 34.337662 | 88 | 0.496218 | 423 | 2,644 | 3.047281 | 0.250591 | 0.026377 | 0.069822 | 0.041893 | 0.183088 | 0.105508 | 0.051202 | 0.017067 | 0 | 0 | 0 | 0.081678 | 0.314675 | 2,644 | 77 | 89 | 34.337662 | 0.629691 | 0.181165 | 0 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148936 | 1 | 0.12766 | false | 0 | 0.021277 | 0 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a006ea941872423364cdc1a21347c42479de83 | 1,301 | py | Python | mainsite/oauth2/validators.py | renatoalmeidaoliveira/peeringdb | 263e35aeec62b5a66fc56241d29da99fc56a3968 | [
"BSD-2-Clause"
] | null | null | null | mainsite/oauth2/validators.py | renatoalmeidaoliveira/peeringdb | 263e35aeec62b5a66fc56241d29da99fc56a3968 | [
"BSD-2-Clause"
] | null | null | null | mainsite/oauth2/validators.py | renatoalmeidaoliveira/peeringdb | 263e35aeec62b5a66fc56241d29da99fc56a3968 | [
"BSD-2-Clause"
] | null | null | null | from oauth2_provider.oauth2_validators import OAuth2Validator
from mainsite.oauth2 import claims
from mainsite.oauth2.scopes import SupportedScopes
class OIDCValidator(OAuth2Validator):
def get_additional_claims(self):
"""PeeringDB-specific claims added to the standard claims defined in a JWT token.
These claims will be omitted if the scope requested does not match any
of the scopes the claim is associated with.
Returns:
List[Tuple(str, callable)]: List of claims to be resolved from request details.
"""
return [
# Standard claims
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
("name", claims.Name([SupportedScopes.PROFILE])),
("given_name", claims.GivenName([SupportedScopes.PROFILE])),
("family_name", claims.FamilyName([SupportedScopes.PROFILE])),
("email", claims.Email([SupportedScopes.EMAIL])),
("email_verified", claims.EmailVerified([SupportedScopes.EMAIL])),
# Custom claims
("id", claims.UserId([SupportedScopes.PROFILE])),
("verified_user", claims.UserVerified([SupportedScopes.PROFILE])),
("networks", claims.Networks([SupportedScopes.NETWORKS])),
]
| 43.366667 | 91 | 0.664105 | 134 | 1,301 | 6.380597 | 0.574627 | 0.128655 | 0.042105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007976 | 0.229055 | 1,301 | 29 | 92 | 44.862069 | 0.844467 | 0.297463 | 0 | 0 | 0 | 0 | 0.077546 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a1a48140d4c683c0afe4d8c2b1164a54f26b18 | 1,090 | py | Python | src/modules/create_signedup_homepage.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | 1 | 2021-01-15T11:23:20.000Z | 2021-01-15T11:23:20.000Z | src/modules/create_signedup_homepage.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | src/modules/create_signedup_homepage.py | AndreasVikke/ComputerScience-Final | 52d09a5876bfde661a00736712db6e3d19be877d | [
"MIT"
] | null | null | null | """
Creates Singedup Home Tap
:license: MIT
"""
import json
from src.dependencies.dependency_typing import PynamoDBConsultant
def create_home_tap(consultant_uuid: str, consultant_model: PynamoDBConsultant):
'''
Creates Home tap with correct time from Consultant
-
:param consultant_uuid: Uuid of Consultant
:param consultant_model: Consultant Model
'''
consultant = consultant_model.get(consultant_uuid)
with open("src/templates/{0}.json".format('home_tap_template_signedup'), "r") as body:
home_tap = json.load(body)
if consultant.time_for_checkin is not None:
home_tap['blocks'][4]['elements'][0]['initial_time'] = consultant.time_for_checkin
if consultant_model.same_day_checkin is not None:
print(consultant.same_day_checkin)
if str(consultant.same_day_checkin) == 'True':
home_tap['blocks'][5]['elements'][0]['initial_options'] =\
[home_tap['blocks'][5]['elements'][0]['options'][0]]
print(home_tap)
return home_tap
| 35.16129 | 94 | 0.661468 | 132 | 1,090 | 5.227273 | 0.401515 | 0.101449 | 0.056522 | 0.069565 | 0.066667 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0.009445 | 0.222936 | 1,090 | 30 | 95 | 36.333333 | 0.805195 | 0.162385 | 0 | 0 | 0 | 0 | 0.151054 | 0.056206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a2a54dce0c10cb7f920eeee475a89ba987e6cb | 1,203 | py | Python | blazeweb/pytest_plugin.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | null | null | null | blazeweb/pytest_plugin.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 6 | 2016-11-01T18:42:34.000Z | 2020-11-16T16:52:14.000Z | blazeweb/pytest_plugin.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T18:20:46.000Z | 2020-01-22T18:20:46.000Z | def pytest_addoption(parser):
parser.addoption("--blazeweb_package", action="store",
help="blazeweb-package: app module to run for tests")
parser.addoption("--blazeweb_profile", action="store", default="Test",
help="blazeweb-profile: app settings profile to use (default is Test)")
def pytest_configure(config):
from blazeutils import tolist
from blazeweb.events import signal
from blazeweb.globals import ag, settings
from blazeweb.hierarchy import findobj
from blazeweb.scripting import load_current_app
_, _, _, wsgiapp = load_current_app(config.getoption('blazeweb_package'),
config.getoption('blazeweb_profile'))
# make the app available to the tests
ag.wsgi_test_app = wsgiapp
# an application can define functions to be called after the app
# is initialized but before any test inspection is done or tests
# are ran. We call those functions here:
for callstring in tolist(settings.testing.init_callables):
tocall = findobj(callstring)
tocall()
# we also support events for pre-test setup
signal('blazeweb.pre_test_init').send()
| 41.482759 | 92 | 0.686617 | 148 | 1,203 | 5.459459 | 0.5 | 0.059406 | 0.056931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233583 | 1,203 | 28 | 93 | 42.964286 | 0.876356 | 0.201995 | 0 | 0 | 0 | 0 | 0.222222 | 0.023061 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.277778 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a3fa09c43da7eb3b219833f17e73b09275f400 | 1,366 | py | Python | xenonpy/utils/math/product.py | mori0711/XenonPy | e36ca0ea112b45ee629cd980c88e80cd6c96c514 | [
"BSD-3-Clause"
] | 93 | 2018-02-11T23:43:47.000Z | 2022-03-11T02:40:11.000Z | xenonpy/utils/math/product.py | mori0711/XenonPy | e36ca0ea112b45ee629cd980c88e80cd6c96c514 | [
"BSD-3-Clause"
] | 192 | 2018-04-20T04:32:12.000Z | 2022-03-24T05:59:18.000Z | xenonpy/utils/math/product.py | mori0711/XenonPy | e36ca0ea112b45ee629cd980c88e80cd6c96c514 | [
"BSD-3-Clause"
] | 51 | 2018-01-18T08:08:55.000Z | 2022-03-01T05:52:22.000Z | # Copyright (c) 2021. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import numpy as np
from numpy import product
class Product(object):
def __init__(self, *paras, repeat=1):
if not isinstance(repeat, int):
raise ValueError('repeat must be int but got {}'.format(
type(repeat)))
lens = [len(p) for p in paras]
if repeat > 1:
lens = lens * repeat
size = product(lens)
acc_list = [np.floor_divide(size, lens[0])]
for len_ in lens[1:]:
acc_list.append(np.floor_divide(acc_list[-1], len_))
self.paras = paras * repeat if repeat > 1 else paras
self.lens = lens
self.size = size
self.acc_list = acc_list
def __getitem__(self, index):
if index > self.size - 1:
raise IndexError
ret = [s - 1 for s in self.lens] # from len to index
remainder = index + 1
for i, acc in enumerate(self.acc_list):
quotient, remainder = np.divmod(remainder, acc)
if remainder == 0:
ret[i] = quotient - 1
break
ret[i] = quotient
return tuple(self.paras[i][j] for i, j in enumerate(ret))
def __len__(self):
return self.size
| 31.045455 | 68 | 0.571742 | 189 | 1,366 | 4.015873 | 0.417989 | 0.055336 | 0.023715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016429 | 0.331625 | 1,366 | 43 | 69 | 31.767442 | 0.814896 | 0.125183 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.0625 | 0.03125 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a4da51eb8ae64048d9c79784888d1d357e9858 | 18,089 | py | Python | tests/helpers/test_table.py | rominf/cleo | 72f6a8a19f26eefc32c3fcf9844484fc9a38583f | [
"MIT"
] | null | null | null | tests/helpers/test_table.py | rominf/cleo | 72f6a8a19f26eefc32c3fcf9844484fc9a38583f | [
"MIT"
] | null | null | null | tests/helpers/test_table.py | rominf/cleo | 72f6a8a19f26eefc32c3fcf9844484fc9a38583f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import copy
from io import BytesIO
from .. import CleoTestCase
from cleo.helpers.table import Table
from cleo.helpers.table_cell import TableCell
from cleo.helpers.table_separator import TableSeparator
from cleo.helpers.table_style import TableStyle
from cleo.outputs.stream_output import StreamOutput
from cleo._compat import decode
class TableTest(CleoTestCase):
books = [
['99921-58-10-7', 'Divine Comedy', 'Dante Alighieri'],
['9971-5-0210-0', 'A Tale of Two Cities', 'Charles Dickens'],
['960-425-059-0', 'The Lord of the Rings', 'J. R. R. Tolkien'],
['80-902734-1-6', 'And Then There Were None', 'Agatha Christie'],
['9782070409341', 'Le Père Goriot', 'Honoré de Balzac']
]
_render_data = [
(
['ISBN', 'Title', 'Author'],
books,
'default',
'''+---------------+--------------------------+------------------+
| ISBN | Title | Author |
+---------------+--------------------------+------------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
| 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens |
| 960-425-059-0 | The Lord of the Rings | J. R. R. Tolkien |
| 80-902734-1-6 | And Then There Were None | Agatha Christie |
| 9782070409341 | Le Père Goriot | Honoré de Balzac |
+---------------+--------------------------+------------------+
'''
),
(
['ISBN', 'Title', 'Author'],
books,
'compact',
''' ISBN Title Author
99921-58-10-7 Divine Comedy Dante Alighieri
9971-5-0210-0 A Tale of Two Cities Charles Dickens
960-425-059-0 The Lord of the Rings J. R. R. Tolkien
80-902734-1-6 And Then There Were None Agatha Christie
9782070409341 Le Père Goriot Honoré de Balzac
'''
),
(
['ISBN', 'Title', 'Author'],
books,
'borderless',
''' =============== ========================== ==================
ISBN Title Author
=============== ========================== ==================
99921-58-10-7 Divine Comedy Dante Alighieri
9971-5-0210-0 A Tale of Two Cities Charles Dickens
960-425-059-0 The Lord of the Rings J. R. R. Tolkien
80-902734-1-6 And Then There Were None Agatha Christie
9782070409341 Le Père Goriot Honoré de Balzac
=============== ========================== ==================
'''
),
(
['ISBN', 'Title'],
[
['99921-58-10-7', 'Divine Comedy', 'Dante Alighieri'],
['9971-5-0210-0'],
['960-425-059-0', 'The Lord of the Rings', 'J. R. R. Tolkien'],
['80-902734-1-6', 'And Then There Were None', 'Agatha Christie']
],
'default',
'''+---------------+--------------------------+------------------+
| ISBN | Title | |
+---------------+--------------------------+------------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
| 9971-5-0210-0 | | |
| 960-425-059-0 | The Lord of the Rings | J. R. R. Tolkien |
| 80-902734-1-6 | And Then There Were None | Agatha Christie |
+---------------+--------------------------+------------------+
'''
),
(
[],
[
['99921-58-10-7', 'Divine Comedy', 'Dante Alighieri'],
['9971-5-0210-0'],
['960-425-059-0', 'The Lord of the Rings', 'J. R. R. Tolkien'],
['80-902734-1-6', 'And Then There Were None', 'Agatha Christie']
],
'default',
'''+---------------+--------------------------+------------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
| 9971-5-0210-0 | | |
| 960-425-059-0 | The Lord of the Rings | J. R. R. Tolkien |
| 80-902734-1-6 | And Then There Were None | Agatha Christie |
+---------------+--------------------------+------------------+
'''
),
(
['ISBN', 'Title'],
[],
'default',
'''+------+-------+
| ISBN | Title |
+------+-------+
'''
),
(
[],
[],
'default',
''
),
(
['ISBN', 'Title', 'Author'],
[
['99921-58-10-7', "Divine\nComedy", 'Dante Alighieri'],
['9971-5-0210-2', "Harry Potter\nand the Chamber of Secrets", "Rowling\nJoanne K."],
['9971-5-0210-2', "Harry Potter\nand the Chamber of Secrets", "Rowling\nJoanne K."],
['960-425-059-0', 'The Lord of the Rings', "J. R. R.\nTolkien"]
],
'default',
'''+---------------+----------------------------+-----------------+
| ISBN | Title | Author |
+---------------+----------------------------+-----------------+
| 99921-58-10-7 | Divine | Dante Alighieri |
| | Comedy | |
| 9971-5-0210-2 | Harry Potter | Rowling |
| | and the Chamber of Secrets | Joanne K. |
| 9971-5-0210-2 | Harry Potter | Rowling |
| | and the Chamber of Secrets | Joanne K. |
| 960-425-059-0 | The Lord of the Rings | J. R. R. |
| | | Tolkien |
+---------------+----------------------------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
['<info>99921-58-10-7</info>', '<error>Divine Comedy</error>', '<fg=blue;bg=white>Dante Alighieri</fg=blue;bg=white>'],
['9971-5-0210-0', 'A Tale of Two Cities', '<info>Charles Dickens</>'],
],
'default',
'''+---------------+----------------------+-----------------+
| ISBN | Title | Author |
+---------------+----------------------+-----------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
| 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens |
+---------------+----------------------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
['99921-58-10-7', 'Divine Comedy', 'Dante Alighieri'],
TableSeparator(),
[TableCell('Divine Comedy(Dante Alighieri)', colspan=3)],
TableSeparator(),
[TableCell('Arduino: A Quick-Start Guide', colspan=2), 'Mark Schmidt'],
TableSeparator(),
['9971-5-0210-0', TableCell('A Tale of \nTwo Cities', colspan=2)]
],
'default',
'''+----------------+---------------+-----------------+
| ISBN | Title | Author |
+----------------+---------------+-----------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
+----------------+---------------+-----------------+
| Divine Comedy(Dante Alighieri) |
+----------------+---------------+-----------------+
| Arduino: A Quick-Start Guide | Mark Schmidt |
+----------------+---------------+-----------------+
| 9971-5-0210-0 | A Tale of |
| | Two Cities |
+----------------+---------------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
[TableCell('9971-5-0210-0', rowspan=3), 'Divine Comedy', 'Dante Alighieri'],
['A Tale of Two Cities', 'Charles Dickens'],
["The Lord of \nthe Rings", "J. R. \nR. Tolkien"],
TableSeparator(),
['80-902734-1-6', TableCell("And Then \nThere \nWere None", rowspan=3), 'Agatha Christie'],
['80-902734-1-7', 'Test']
],
'default',
'''+---------------+----------------------+-----------------+
| ISBN | Title | Author |
+---------------+----------------------+-----------------+
| 9971-5-0210-0 | Divine Comedy | Dante Alighieri |
| | A Tale of Two Cities | Charles Dickens |
| | The Lord of | J. R. |
| | the Rings | R. Tolkien |
+---------------+----------------------+-----------------+
| 80-902734-1-6 | And Then | Agatha Christie |
| 80-902734-1-7 | There | Test |
| | Were None | |
+---------------+----------------------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
[TableCell('9971-5-0210-0', rowspan=2, colspan=2), 'Dante Alighieri'],
['Charles Dickens'],
TableSeparator(),
['Dante Alighieri', TableCell('9971-5-0210-0', rowspan=3, colspan=2)],
['J. R. R. Tolkien'],
['J. R. R']
],
'default',
'''+------------------+--------+-----------------+
| ISBN | Title | Author |
+------------------+--------+-----------------+
| 9971-5-0210-0 | Dante Alighieri |
| | Charles Dickens |
+------------------+--------+-----------------+
| Dante Alighieri | 9971-5-0210-0 |
| J. R. R. Tolkien | |
| J. R. R | |
+------------------+--------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
[TableCell("9971\n-5-\n021\n0-0", rowspan=2, colspan=2), 'Dante Alighieri'],
['Charles Dickens'],
TableSeparator(),
['Dante Alighieri', TableCell("9971\n-5-\n021\n0-0", rowspan=2, colspan=2)],
['Charles Dickens'],
TableSeparator(),
[
TableCell("9971\n-5-\n021\n0-0", rowspan=2, colspan=2),
TableCell("Dante \nAlighieri", rowspan=2, colspan=1)
]
],
'default',
'''+-----------------+-------+-----------------+
| ISBN | Title | Author |
+-----------------+-------+-----------------+
| 9971 | Dante Alighieri |
| -5- | Charles Dickens |
| 021 | |
| 0-0 | |
+-----------------+-------+-----------------+
| Dante Alighieri | 9971 |
| Charles Dickens | -5- |
| | 021 |
| | 0-0 |
+-----------------+-------+-----------------+
| 9971 | Dante |
| -5- | Alighieri |
| 021 | |
| 0-0 | |
+-----------------+-------+-----------------+
'''
),
(
['ISBN', 'Title', 'Author'],
[
[TableCell("9971\n-5-\n021\n0-0", rowspan=2, colspan=2), 'Dante Alighieri'],
['Charles Dickens'],
['Dante Alighieri', TableCell("9971\n-5-\n021\n0-0", rowspan=2, colspan=2)],
['Charles Dickens']
],
'default',
'''+-----------------+-------+-----------------+
| ISBN | Title | Author |
+-----------------+-------+-----------------+
| 9971 | Dante Alighieri |
| -5- | Charles Dickens |
| 021 | |
| 0-0 | |
| Dante Alighieri | 9971 |
| Charles Dickens | -5- |
| | 021 |
| | 0-0 |
+-----------------+-------+-----------------+
'''
),
(
['ISBN', 'Author'],
[
[TableCell('9971-5-0210-0', rowspan=3, colspan=1), 'Dante Alighieri'],
[TableSeparator()],
['Charles Dickens']
],
'default',
'''+---------------+-----------------+
| ISBN | Author |
+---------------+-----------------+
| 9971-5-0210-0 | Dante Alighieri |
| |-----------------|
| | Charles Dickens |
+---------------+-----------------+
'''
),
(
[
[TableCell('Main title', colspan=3)],
['ISBN', 'Title', 'Author']
],
[],
'default',
'''+------+-------+--------+
| Main title |
+------+-------+--------+
| ISBN | Title | Author |
+------+-------+--------+
'''
),
(
[],
[
[
TableCell('1', colspan=3),
TableCell('2', colspan=2),
TableCell('3', colspan=2),
TableCell('4', colspan=2)
]
],
'default',
'''+--+--+--+--+--+--+--+--+--+
| 1 | 2 | 3 | 4 |
+--+--+--+--+--+--+--+--+--+
'''
)
]
@property
def render_data(self):
return copy.deepcopy(self._render_data)
def setUp(self):
self.stream = BytesIO()
def tearDown(self):
self.stream.close()
self.stream = None
def test_render(self):
"""
TableHelper.render() should behave properly
"""
for data_set in self.render_data:
headers, rows, layout, expected = data_set
output = self.get_output_stream()
table = Table(output)
table.set_headers(headers)\
.set_rows(rows)\
.set_style(layout)
table.render()
self.assertEqual(decode(expected), self.get_output_content(output))
def test_render_add_rows(self):
"""
TableHelper.render() should behave properly after adding rows
"""
for data_set in self.render_data:
headers, rows, layout, expected = data_set
output = self.get_output_stream()
table = Table(output)
table.set_headers(headers)\
.add_rows(rows)\
.set_style(layout)
table.render()
self.assertEqual(decode(expected), self.get_output_content(output))
def test_render_add_rows_one_by_one(self):
"""
TableHelper.render() should behave properly after adding rows one by one
"""
for data_set in self.render_data:
headers, rows, layout, expected = data_set
output = self.get_output_stream()
table = Table(output)
table.set_headers(headers)\
.set_style(layout)
for row in rows:
table.add_row(row)
table.render()
self.assertEqual(decode(expected), self.get_output_content(output))
def test_style(self):
style = TableStyle()
style.set_horizontal_border_char('.')
style.set_vertical_border_char('.')
style.set_crossing_char('.')
Table.set_style_definition('dotfull', style)
output = self.get_output_stream()
table = Table(output)
table.set_headers(['Foo'])
table.set_rows([['Bar']])
table.set_style('dotfull')
table.render()
expected = '''.......
. Foo .
.......
. Bar .
.......
'''
self.assertEqual(expected, self.get_output_content(output))
def test_row_separator(self):
output = self.get_output_stream()
table = Table(output)
table.set_headers(['Foo'])
table.set_rows([
['Bar1'],
TableSeparator(),
['Bar2'],
TableSeparator(),
['Bar3']
])
table.render()
expected = '''+------+
| Foo |
+------+
| Bar1 |
+------+
| Bar2 |
+------+
| Bar3 |
+------+
'''
self.assertEqual(expected, self.get_output_content(output))
def test_render_multi_calls(self):
output = self.get_output_stream()
table = Table(output)
table.set_rows([
[TableCell('foo', colspan=2)]
])
table.render()
table.render()
table.render()
expected = '''+---+--+
| foo |
+---+--+
+---+--+
| foo |
+---+--+
+---+--+
| foo |
+---+--+
'''
self.assertEqual(expected, self.get_output_content(output))
def test_column_style(self):
output = self.get_output_stream()
table = Table(output)
table.set_headers(['ISBN', 'Title', 'Author', 'Price'])
table.set_rows([
['99921-58-10-7', 'Divine Comedy', 'Dante Alighieri', '9.95'],
['9971-5-0210-0', 'A Tale of Two Cities', 'Charles Dickens', '139.25']
])
style = TableStyle()
style.set_pad_type('left')
table.set_column_style(3, style)
table.render()
expected = '''+---------------+----------------------+-----------------+--------+
| ISBN | Title | Author | Price |
+---------------+----------------------+-----------------+--------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri | 9.95 |
| 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens | 139.25 |
+---------------+----------------------+-----------------+--------+
'''
self.assertEqual(expected, self.get_output_content(output))
def get_output_stream(self):
stream = BytesIO()
return StreamOutput(stream, StreamOutput.VERBOSITY_NORMAL, False)
def get_output_content(self, output):
output.get_stream().seek(0)
value = output.get_stream().getvalue()
return decode(value).replace(os.linesep, "\n")
| 35.056202 | 135 | 0.379844 | 1,502 | 18,089 | 4.505992 | 0.120506 | 0.070331 | 0.034574 | 0.032506 | 0.704196 | 0.691637 | 0.672872 | 0.669031 | 0.649084 | 0.581117 | 0 | 0.073254 | 0.35552 | 18,089 | 515 | 136 | 35.124272 | 0.507291 | 0.011112 | 0 | 0.552632 | 0 | 0.003289 | 0.263088 | 0.026557 | 0 | 0 | 0 | 0 | 0.023026 | 1 | 0.039474 | false | 0 | 0.032895 | 0.003289 | 0.092105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a58855d9fd93a2b52c28c05cbbd7858d431985 | 9,717 | py | Python | pywebcopy/core.py | wasim961/pywebcopy | ed4da43cbaa08bf3b1c0f5caa30846f410544cdb | [
"Apache-2.0"
] | 257 | 2018-09-10T15:19:28.000Z | 2022-03-26T17:54:17.000Z | pywebcopy/core.py | wasim961/pywebcopy | ed4da43cbaa08bf3b1c0f5caa30846f410544cdb | [
"Apache-2.0"
] | 75 | 2018-09-26T08:34:05.000Z | 2022-03-15T18:03:55.000Z | pywebcopy/core.py | wasim961/pywebcopy | ed4da43cbaa08bf3b1c0f5caa30846f410544cdb | [
"Apache-2.0"
] | 71 | 2018-11-29T02:19:54.000Z | 2022-03-30T12:53:48.000Z | # -*- coding: utf-8 -*-
"""
pywebcopy.core
~~~~~~~~~~~~~~
* DO NOT TOUCH *
Core functionality of the pywebcopy engine.
"""
from __future__ import absolute_import
import logging
import os
import shutil
import zipfile
from datetime import datetime
import threading
from .configs import config, SESSION
from .globals import MARK, __version__, lru_cache
LOGGER = logging.getLogger('core')
def zip_project(timeout=10):
"""Makes zip archive of current project folder and returns the location.
:rtype: str
:returns: location of the zipped project_folder file.
"""
# wait for the threads to finish downloading files
for thread in threading.enumerate():
if not thread or isinstance(thread, threading._MainThread):
continue
if thread.is_alive():
thread.join(timeout=timeout)
zip_fn = os.path.abspath(config['project_folder']) + '.zip'
with zipfile.ZipFile(zip_fn, 'w', zipfile.ZIP_DEFLATED) as archive:
#: Iterate through file tree
for folder, _, fn in os.walk(config['project_folder']):
# only files will be added to the zip archive instead of empty
# folder which might have been created during process
for f in fn:
try:
new_fn = os.path.join(folder, f)
archive.write(new_fn, new_fn[len(config['project_folder']):])
except ValueError:
LOGGER.error("Attempt to use ZIP archive that was already closed")
except RuntimeError:
LOGGER.exception("Failed to add file to archive file %s" % f, exc_info=True)
LOGGER.info('Saved the Project as ZIP archive at %s' % (config['project_folder'] + '.zip'))
# Project folder can be automatically deleted after making zip file from it
# this is True by default and will delete the complete project folder
if config['delete_project_folder']:
shutil.rmtree(config['project_folder'])
LOGGER.info("Downloaded Contents Size :: {} KB's".format(getattr(SESSION, '_bytes')//1024))
return zip_fn
#
# from flask import Flask
#
#
# class PropertiesMixin(object):
#
# def _get_project_folder(self):
# if self._static_folder is not None:
# return os.path.join(self.root_path, self._static_folder)
#
# def _set_project_folder(self, value):
# self._static_folder = value
#
# project_folder = property(
# _get_project_folder, _set_project_folder,
# doc='The absolute path to the configured static folder.'
# )
# del _get_project_folder, _set_project_folder
#
# def _get_project_url(self):
# if self._project_url is not None:
# return self._project_url
#
# if self.static_folder is not None:
# return '/' + os.path.basename(self.static_folder)
#
# def _set_project_url(self, value):
# self._project_url = value
#
# project_url = property(
# _get_project_url, _set_project_url,
# doc='The URL prefix that the static route will be registered for.'
# )
# del _get_project_url, _set_project_url
#
#
# class Manager(PropertiesMixin):
#
# default_config = {}
#
#
# def _dummy_resp(reason=None):
# """ Response with dummy data so that a dummy file will always be downloaded """
#
# dummy_resp = Response()
#
# if reason:
# _text = (b'This File could not be downloaded.\n'
# b'Reason: \n\n %r \n\n' % reason.encode())
# else:
# _text = b'This File could not be downloaded.\n\n'
#
# dummy_resp.raw = BytesIO(_text)
# dummy_resp.encoding = 'utf-8' # plain encoding
# dummy_resp.status_code = 200 # fake the status
# dummy_resp.is_dummy = True # but leave a mark
# dummy_resp.reason = 'Failed to access' # fail reason
# return dummy_resp
#
#
# def get(url, *args, **kwargs):
# """ fetches contents from internet using `requests`.
#
# makes http request using custom configs
# it returns requests object if request was successful
# None otherwise.
#
# :param str url: the url of the page or file to be fetched
# :returns object: requests obj or None
# """
#
# # Make a check if url is meant for public viewing by checking for
# # the url in the robots.txt file provided by site.
# try:
#
# # Uses the requests module to make a get request using a persistent session
# # object and returns that
# # otherwise on fail it returns None
# resp = SESSION.get(url, *args, **kwargs)
#
# # log downloaded file size
# config['download_size'] += int(resp.headers.get('content-length', 0))
#
# except HTTPError as err:
# LOGGER.error(err)
#
# # try to get the default response returned by the `requests`
# resp = err.response
#
# if not resp:
# resp = _dummy_resp()
# resp.request = err.request
#
# except ConnectionError: # Catches any other exception raised by `requests`
# LOGGER.error("Failed to access url at address %s" % url)
# resp = _dummy_resp()
#
# return resp
def _watermark(file_path):
"""Returns a string wrapped in comment characters for specific file type."""
file_type = os.path.splitext(file_path)[1] or ''
# Only specific for the html file types So that the comment does not pop up as
# content on the page
if file_type.lower() in ['.html', '.htm', '.xhtml', '.aspx', '.asp', '.php']:
comment_start = '<!--!'
comment_end = '-->'
elif file_type.lower() in ['.css', '.js', '.xml']:
comment_start = '/*!'
comment_end = '*/'
else:
return b''
return MARK.format(comment_start, __version__, file_path, datetime.utcnow(), comment_end).encode()
@lru_cache(maxsize=100)
def is_allowed(ext):
if not ext:
return False
if ext.strip().lower() in config['allowed_file_ext']:
return True
return False
#
# def new_file(location, content_url=None, content=None):
# """Fail-safe Downloads any file to the disk.
#
# :param str location: path where to save the file
#
# :param bytes content: contents or binary data of the file
# :OR:
# :param str content_url: download the file from url
#
# :returns str: location of downloaded file on disk if download was successful
# None otherwise
# """
# assert location, "Download location needed to be specified!"
# assert isinstance(location, str), "Download location must be a string!"
# assert content or content_url, "Either file content or file url is needed!"
# if content_url:
# assert isinstance(content_url, str), "File url must be a string!"
#
# if content:
# assert isinstance(content, bytes), "Expected type bytes, got %r instead" % type(content)
#
# req = None # type: Response
#
# _file_ext = '.' + location.rsplit('.', 1)[1].lower().strip()
#
# if not is_allowed(_file_ext):
# LOGGER.critical('File ext %r is not allowed for file at %r' % (_file_ext, content_url or location))
# return
#
# # The file path provided can already be existing so only overwrite the files
# # when specifically configured to do so by config key 'over_write'
# if os.path.exists(location):
#
# if not config['over_write']:
# LOGGER.debug('File already exists at the location %s' % location)
# return location
#
# else:
# os.remove(location)
# LOGGER.info('ReDownloading the file of type %s to %s' % (_file_ext, location))
# else:
# LOGGER.info('Downloading a new file of type %s to %s' % (_file_ext, location))
#
# # Contents of the files can be supplied or filled by a content url
# # function we go online to download content from content url
# if not content and content_url is not None:
#
# LOGGER.info('Downloading content of file %s from %s' % (location, content_url))
#
# req = get(content_url, stream=True)
# # The file may not be available so will raise an error which will be caught by
# # except block an will return None
# if req is None or not req.ok:
# LOGGER.error('Failed to load the content of file %s from %s' % (location, content_url))
# return
#
# try:
# # Files can throw an IOError or similar when failed to open or write in that
# LOGGER.debug("Making path for the file at location %s" % location)
# if not os.path.exists(os.path.dirname(location)):
# os.make dirs(os.path.dirname(location))
#
# except OSError as e:
# LOGGER.critical(e)
# LOGGER.critical("Failed to create path for the file of type %s to location %s" % (_file_ext, location))
# return
#
# try:
# # case the function will catch it and log it then return None
# LOGGER.info("Writing file at location %s" % location)
#
# if isinstance(req, Response):
# with open(location, 'wb') as f:
# # should write in chunks to manage ram usages?
# f.write(req.content)
# f.write(_watermark(content_url or location))
# else:
# with open(location, 'wb') as f:
# f.write(content)
# f.write(_watermark(content_url or location))
#
# except Exception as e:
# LOGGER.critical(e)
# LOGGER.critical("Download failed for the file of type %s to location %s" % (_file_ext, location))
# return
# else:
# LOGGER.info('File of type %s written successfully to %s' % (_file_ext, location))
# return location
| 33.857143 | 113 | 0.626222 | 1,287 | 9,717 | 4.604507 | 0.250971 | 0.037293 | 0.015187 | 0.009281 | 0.137867 | 0.133817 | 0.088424 | 0.077624 | 0.063449 | 0.0297 | 0 | 0.002532 | 0.268396 | 9,717 | 286 | 114 | 33.975524 | 0.831059 | 0.732119 | 0 | 0.04 | 0 | 0 | 0.143524 | 0.008917 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.18 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a7e848a4cbaf6854ddb5adea417e88be38fccd | 2,549 | py | Python | preprocess.py | njellinas/voice-conversion-CycleGAN | 1cfa34dc1c2f677eb18a232049d0b6eb1fa7f28d | [
"MIT"
] | 51 | 2019-04-23T15:10:32.000Z | 2021-02-24T09:41:16.000Z | preprocess.py | njellinas/voice-conversion-CycleGAN | 1cfa34dc1c2f677eb18a232049d0b6eb1fa7f28d | [
"MIT"
] | 4 | 2019-05-10T06:45:57.000Z | 2020-02-04T17:49:04.000Z | preprocess.py | njellinas/voice-conversion-CycleGAN | 1cfa34dc1c2f677eb18a232049d0b6eb1fa7f28d | [
"MIT"
] | 19 | 2019-04-30T12:24:38.000Z | 2021-09-17T14:52:51.000Z | import os
import time
from speech_tools import *
dataset = 'vcc2018'
src_speaker = 'VCC2SF3'
trg_speaker = 'VCC2TM1'
data_dir = os.path.join('datasets', dataset)
exp_dir = os.path.join('experiments', dataset)
train_A_dir = os.path.join(data_dir, 'vcc2018_training', src_speaker)
train_B_dir = os.path.join(data_dir, 'vcc2018_training', trg_speaker)
exp_A_dir = os.path.join(exp_dir, src_speaker)
exp_B_dir = os.path.join(exp_dir, trg_speaker)
os.makedirs(exp_A_dir, exist_ok=True)
os.makedirs(exp_B_dir, exist_ok=True)
sampling_rate = 22050
num_mcep = 36
frame_period = 5.0
n_frames = 128
print('Loading Wavs...')
start_time = time.time()
wavs_A = load_wavs(wav_dir=train_A_dir, sr=sampling_rate)
wavs_B = load_wavs(wav_dir=train_B_dir, sr=sampling_rate)
print('Extracting acoustic features...')
f0s_A, timeaxes_A, sps_A, aps_A, coded_sps_A = world_encode_data(wavs=wavs_A, fs=sampling_rate,
frame_period=frame_period, coded_dim=num_mcep)
f0s_B, timeaxes_B, sps_B, aps_B, coded_sps_B = world_encode_data(wavs=wavs_B, fs=sampling_rate,
frame_period=frame_period, coded_dim=num_mcep)
print('Calculating F0 statistics...')
log_f0s_mean_A, log_f0s_std_A = logf0_statistics(f0s_A)
log_f0s_mean_B, log_f0s_std_B = logf0_statistics(f0s_B)
print('Log Pitch A')
print('Mean: %f, Std: %f' % (log_f0s_mean_A, log_f0s_std_A))
print('Log Pitch B')
print('Mean: %f, Std: %f' % (log_f0s_mean_B, log_f0s_std_B))
print('Normalizing data...')
coded_sps_A_transposed = transpose_in_list(lst=coded_sps_A)
coded_sps_B_transposed = transpose_in_list(lst=coded_sps_B)
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std = coded_sps_normalization_fit_transoform(
coded_sps=coded_sps_A_transposed)
coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std = coded_sps_normalization_fit_transoform(
coded_sps=coded_sps_B_transposed)
print('Saving data...')
save_pickle(os.path.join(exp_A_dir, 'cache{}.p'.format(num_mcep)),
(coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std, log_f0s_mean_A, log_f0s_std_A))
save_pickle(os.path.join(exp_B_dir, 'cache{}.p'.format(num_mcep)),
(coded_sps_B_norm, coded_sps_B_mean, coded_sps_B_std, log_f0s_mean_B, log_f0s_std_B))
end_time = time.time()
time_elapsed = end_time - start_time
print('Preprocessing Done.')
print('Time Elapsed for Data Preprocessing: %02d:%02d:%02d' % (
time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1)))
| 34.917808 | 111 | 0.735583 | 429 | 2,549 | 3.916084 | 0.219114 | 0.114286 | 0.053571 | 0.046429 | 0.515476 | 0.463095 | 0.413095 | 0.370238 | 0.210714 | 0.210714 | 0 | 0.030429 | 0.149078 | 2,549 | 72 | 112 | 35.402778 | 0.744122 | 0 | 0 | 0.039216 | 0 | 0 | 0.126716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.058824 | 0.215686 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a90459948d7388e7644adef52b54dbe1e4cde7 | 1,813 | py | Python | deploy/forms.py | doordash/django-deploy | 712f8a24cffc8ea8f01ca78cbff84b1ebfc20b5c | [
"BSD-3-Clause"
] | 3 | 2019-02-14T05:13:59.000Z | 2019-05-17T08:14:13.000Z | deploy/forms.py | doordash/django-deploy | 712f8a24cffc8ea8f01ca78cbff84b1ebfc20b5c | [
"BSD-3-Clause"
] | null | null | null | deploy/forms.py | doordash/django-deploy | 712f8a24cffc8ea8f01ca78cbff84b1ebfc20b5c | [
"BSD-3-Clause"
] | 2 | 2017-01-31T08:59:08.000Z | 2019-12-31T14:16:37.000Z | from plistlib import readPlist
from django import forms
from deploy.models import App
class AppForm(forms.ModelForm):
def clean_plist(self):
if not 'plist' in self.files:
raise forms.ValidationError('No plist file attached.')
plist = self.files['plist']
extension = plist.name.split('.')[-1]
if extension != 'plist':
raise forms.ValidationError('Invalid plist file.')
return self.cleaned_data['plist']
def clean_ipa(self):
if not 'ipa' in self.files:
raise forms.ValidationError('No ipa file attached.')
ipa = self.files['ipa']
extension = ipa.name.split('.')[-1]
if extension != 'ipa':
raise forms.ValidationError('Invalid ipa file.')
return self.cleaned_data['ipa']
def clean_name(self):
identifier = self.get_key_value_from_plist('bundle-identifier')
name = identifier.split('.')[-1]
self.cleaned_data['name'] = name
return self.cleaned_data['name']
def clean_version(self):
version = self.get_key_value_from_plist('bundle-version')
self.cleaned_data['version'] = version
return self.cleaned_data['version']
def get_key_value_from_plist(self, key):
if not hasattr(self, 'plist'):
plist = self.files['plist']
self.plist = readPlist(plist)
data = self.plist['items'][0]
metadata = data['metadata']
return metadata.get(key, None)
class Meta:
model = App
# It is important that plist is validated before name and version
fields = ('plist', 'ipa', 'is_active', 'name', 'version')
widgets = {'name': forms.HiddenInput({'value': 'default'}),
'version': forms.HiddenInput({'value': 'default'})}
| 35.54902 | 73 | 0.61059 | 214 | 1,813 | 5.065421 | 0.275701 | 0.060886 | 0.083026 | 0.077491 | 0.228782 | 0.125461 | 0.125461 | 0 | 0 | 0 | 0 | 0.002978 | 0.259239 | 1,813 | 50 | 74 | 36.26 | 0.80417 | 0.034749 | 0 | 0.04878 | 0 | 0 | 0.145309 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121951 | false | 0 | 0.073171 | 0 | 0.365854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5a9c5b42cb43ae6a669a39bf1867c977d834a7b | 1,274 | py | Python | pytest/ban.py | ysc3839/vcmp-python-test | 3ccd7788cb97dc302b0a4d3d7ba833196585afde | [
"MIT"
] | 1 | 2022-01-13T18:40:11.000Z | 2022-01-13T18:40:11.000Z | pytest/ban.py | ysc3839/vcmp-python-test | 3ccd7788cb97dc302b0a4d3d7ba833196585afde | [
"MIT"
] | null | null | null | pytest/ban.py | ysc3839/vcmp-python-test | 3ccd7788cb97dc302b0a4d3d7ba833196585afde | [
"MIT"
] | null | null | null | import re
from _vcmp import functions as func
TYPE_UID = 0
TYPE_UID2 = 1
TYPE_FULLSTR = 3
TYPE_SUBSTR = 4
TYPE_REGEX = 5
ban_list = []
def load_ban_list(l):
global ban_list
for k, v in l.items():
if k == 'uid':
for i in v:
ban_list.append((i, TYPE_UID))
elif k == 'uid2':
for i in v:
ban_list.append((i, TYPE_UID2))
elif k == 'name':
for i in v:
if isinstance(i, str):
ban_list.append((i, TYPE_FULLSTR))
elif isinstance(i, list):
ban_list.append((i[0], i[1] + TYPE_FULLSTR))
def check_ban_list(player_id):
uid = func.get_player_uid(player_id)
uid2 = func.get_player_uid2(player_id)
name = func.get_player_name(player_id)
for n, t in ban_list:
if t == TYPE_UID:
if uid == n:
return True
elif t == TYPE_UID2:
if uid2 == n:
return True
elif t == TYPE_FULLSTR:
if name == n:
return True
elif t == TYPE_SUBSTR:
if name.find(n) != -1:
return True
elif t == TYPE_REGEX:
if not re.search(n, name):
return True
| 26.541667 | 64 | 0.5 | 178 | 1,274 | 3.376404 | 0.264045 | 0.104825 | 0.086522 | 0.093178 | 0.244592 | 0.183028 | 0.083195 | 0.083195 | 0.083195 | 0 | 0 | 0.019711 | 0.402669 | 1,274 | 47 | 65 | 27.106383 | 0.770039 | 0 | 0 | 0.186047 | 0 | 0 | 0.008634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0 | 0.209302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5aa75fb7831ae0f43e75f1e37080e36b3e0209d | 2,769 | py | Python | spb/__main__.py | rdempsey/simple-python-blockchain | 34ba22d0e23c1949cf30dcdd399dabe2d0965a03 | [
"MIT"
] | null | null | null | spb/__main__.py | rdempsey/simple-python-blockchain | 34ba22d0e23c1949cf30dcdd399dabe2d0965a03 | [
"MIT"
] | null | null | null | spb/__main__.py | rdempsey/simple-python-blockchain | 34ba22d0e23c1949cf30dcdd399dabe2d0965a03 | [
"MIT"
] | 1 | 2021-08-12T00:56:24.000Z | 2021-08-12T00:56:24.000Z | """
Python Simple Blockchain
Usage:
__main__.py <log-name> <log-level> <num-blocks-to-create>
__main__.py <log-name> <log-level> <num-blocks-to-create> --log-dir=<dirpath> --log-file-name=<filename>
__main__.py (-h | --help)
__main__.py (-v | --version)
Options:
-h --help Show this screen.
-v --version Show version.
--log-dir=<ld> Log directory.
--log-file-name=<lfn> Log file name.
"""
import logging
import os
from datetime import datetime
from docopt import docopt
from spb.lib.block import Block
def run_spb(args):
_logger = _create_logger(args=args)
blockchain = []
_add_genesis_block_to_blockchain(blockchain=blockchain)
_add_blocks_to_blockchain(num_blocks_to_add=int(args['<num-blocks-to-create>']),
blockchain=blockchain,
logger=_logger)
def _create_logger(args):
log_name = args['<log-name>']
log_level = args['<log-level>']
logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=logging_format)
logger = logging.getLogger(log_name)
logger.setLevel(log_level)
if args['--log-file-name']:
log_dir = args['--log-dir']
log_file_name = args['--log-file-name']
_make_directory(log_dir)
log_file_path = os.path.join(log_dir, log_file_name)
fh = logging.FileHandler(filename=log_file_path)
fh.setLevel(log_level)
formatter = logging.Formatter(logging_format)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def _make_directory(directory_path):
if not os.path.exists(directory_path):
os.makedirs(directory_path)
return directory_path
def _add_genesis_block_to_blockchain(blockchain):
genesis_block = Block(index=0, timestamp=datetime.utcnow(), data="Genesis Block", previous_hash="0")
blockchain.append(genesis_block)
def _add_blocks_to_blockchain(num_blocks_to_add, blockchain, logger):
for i in range(0, num_blocks_to_add):
previous_block = blockchain[i]
block_to_add = _create_block(previous_block)
blockchain.append(block_to_add)
logger.info("Block #{} has been added to the blockchain!".format(block_to_add.index))
logger.info("Hash: {}\n".format(block_to_add.hash))
def _create_block(last_block):
b_index = last_block.index + 1
b_timestamp = datetime.utcnow()
b_data = "Hey! I'm block " + str(b_index)
b_hash = last_block.hash
return Block(index=b_index, timestamp=b_timestamp, data=b_data, previous_hash=b_hash)
if __name__ == '__main__':
args = docopt(__doc__, version='Simple Python Blockchain 0.1.0')
run_spb(args=args)
| 29.147368 | 109 | 0.670639 | 372 | 2,769 | 4.669355 | 0.241935 | 0.036269 | 0.044329 | 0.025907 | 0.146229 | 0.126655 | 0.084053 | 0.084053 | 0.043754 | 0.043754 | 0 | 0.003188 | 0.206934 | 2,769 | 94 | 110 | 29.457447 | 0.787796 | 0.163958 | 0 | 0 | 0 | 0 | 0.110147 | 0.00954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.09434 | 0 | 0.264151 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5abf57855b0878f431ddcdda9b241c56bf21ca9 | 1,799 | py | Python | 1742 Maximum Number of Balls in a Box.py | AtharvRedij/leetcode-solutions | 7194d202302989d53c241b12c9befb06923b1510 | [
"MIT"
] | null | null | null | 1742 Maximum Number of Balls in a Box.py | AtharvRedij/leetcode-solutions | 7194d202302989d53c241b12c9befb06923b1510 | [
"MIT"
] | null | null | null | 1742 Maximum Number of Balls in a Box.py | AtharvRedij/leetcode-solutions | 7194d202302989d53c241b12c9befb06923b1510 | [
"MIT"
] | 1 | 2021-03-06T06:15:48.000Z | 2021-03-06T06:15:48.000Z | '''
URL: https://leetcode.com/problems/maximum-number-of-balls-in-a-box/
Difficulty: Easy
Description: Maximum Number of Balls in a Box
You are working in a ball factory where you have n balls numbered from lowLimit up to highLimit inclusive (i.e., n == highLimit - lowLimit + 1), and an infinite number of boxes numbered from 1 to infinity.
Your job at this factory is to put each ball in the box with a number equal to the sum of digits of the ball's number. For example, the ball number 321 will be put in the box number 3 + 2 + 1 = 6 and the ball number 10 will be put in the box number 1 + 0 = 1.
Given two integers lowLimit and highLimit, return the number of balls in the box with the most balls.
Example 1:
Input: lowLimit = 1, highLimit = 10
Output: 2
Explanation:
Box Number: 1 2 3 4 5 6 7 8 9 10 11 ...
Ball Count: 2 1 1 1 1 1 1 1 1 0 0 ...
Box 1 has the most number of balls with 2 balls.
Example 2:
Input: lowLimit = 5, highLimit = 15
Output: 2
Explanation:
Box Number: 1 2 3 4 5 6 7 8 9 10 11 ...
Ball Count: 1 1 1 1 2 2 1 1 1 0 0 ...
Boxes 5 and 6 have the most number of balls with 2 balls in each.
Example 3:
Input: lowLimit = 19, highLimit = 28
Output: 2
Explanation:
Box Number: 1 2 3 4 5 6 7 8 9 10 11 12 ...
Ball Count: 0 1 1 1 1 1 1 1 1 2 0 0 ...
Box 10 has the most number of balls with 2 balls.
Constraints:
1 <= lowLimit <= highLimit <= 105
'''
from collections import defaultdict
class Solution:
def getSum(self, num):
s = 0
for d in str(num):
s += int(d)
return s
def countBalls(self, lowLimit, highLimit):
countDict = defaultdict(int)
for i in range(lowLimit, highLimit+1):
s = self.getSum(i)
countDict[s] += 1
return max(countDict.values())
| 26.850746 | 259 | 0.662034 | 335 | 1,799 | 3.555224 | 0.298507 | 0.031906 | 0.037783 | 0.036944 | 0.303107 | 0.292191 | 0.292191 | 0.209908 | 0.171285 | 0.115869 | 0 | 0.090293 | 0.261256 | 1,799 | 66 | 260 | 27.257576 | 0.805869 | 0.772651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5aead02c4ce5e4d17e57ea044adbf8fccbe6f90 | 27,292 | py | Python | elfi/store.py | diadochos/elfi | f2932297d686403950f7f55a290cd25af10dbda6 | [
"BSD-3-Clause"
] | 166 | 2017-03-05T17:10:38.000Z | 2022-03-31T21:25:04.000Z | elfi/store.py | diadochos/elfi | f2932297d686403950f7f55a290cd25af10dbda6 | [
"BSD-3-Clause"
] | 78 | 2017-04-05T11:46:23.000Z | 2022-03-28T13:11:44.000Z | elfi/store.py | diadochos/elfi | f2932297d686403950f7f55a290cd25af10dbda6 | [
"BSD-3-Clause"
] | 56 | 2017-03-19T17:51:57.000Z | 2022-03-16T13:17:52.000Z | """This module contains implementations for storing simulated values for later use."""
import io
import logging
import os
import pickle
import shutil
import numpy as np
import numpy.lib.format as npformat
logger = logging.getLogger(__name__)
_default_prefix = 'pools'
class OutputPool:
"""Store node outputs to dictionary-like stores.
The default store is a Python dictionary.
Notes
-----
Saving the store requires that all the stores are pickleable.
Arbitrary objects that support simple array indexing can be used as stores by using
the `elfi.store.ArrayObjectStore` class.
See the `elfi.store.StoreBase` interfaces if you wish to implement your own ELFI
compatible store. Basically any object that fulfills the Pythons dictionary
api will work as a store in the pool.
"""
_pkl_name = '_outputpool.pkl'
def __init__(self, outputs=None, name=None, prefix=None):
"""Initialize OutputPool.
Depending on the algorithm, some of these values may be reused
after making some changes to `ElfiModel` thus speeding up the inference
significantly. For instance, if all the simulations are stored in Rejection
sampling, one can change the summaries and distances without having to rerun
the simulator.
Parameters
----------
outputs : list, dict, optional
List of node names which to store or a dictionary with existing stores. The
stores are created on demand.
name : str, optional
Name of the pool. Used to open a saved pool from disk.
prefix : str, optional
Path to directory under which `elfi.ArrayPool` will place its folder.
Default is a relative path ./pools.
Returns
-------
instance : OutputPool
"""
if outputs is None:
stores = {}
elif isinstance(outputs, dict):
stores = outputs
else:
stores = dict.fromkeys(outputs)
self.stores = stores
# Context information
self.batch_size = None
self.seed = None
self.name = name
self.prefix = prefix or _default_prefix
if self.path and os.path.exists(self.path):
raise ValueError("A pool with this name already exists in {}. You can use "
"OutputPool.open() to open it.".format(self.prefix))
@property
def output_names(self):
"""Return a list of stored names."""
return list(self.stores.keys())
@property
def has_context(self):
"""Check if current pool has context information."""
return self.seed is not None and self.batch_size is not None
def set_context(self, context):
"""Set the context of the pool.
The pool needs to know the batch_size and the seed.
Notes
-----
Also sets the name of the pool if not set already.
Parameters
----------
context : elfi.ComputationContext
"""
if self.has_context:
raise ValueError('Context is already set')
self.batch_size = context.batch_size
self.seed = context.seed
if self.name is None:
self.name = "{}_{}".format(self.__class__.__name__.lower(), self.seed)
def get_batch(self, batch_index, output_names=None):
"""Return a batch from the stores of the pool.
Parameters
----------
batch_index : int
output_names : list
which outputs to include to the batch
Returns
-------
batch : dict
"""
output_names = output_names or self.output_names
batch = dict()
for output in output_names:
store = self.stores[output]
if store is None:
continue
if batch_index in store:
batch[output] = store[batch_index]
return batch
def add_batch(self, batch, batch_index):
"""Add the outputs from the batch to their stores."""
for node, values in batch.items():
if node not in self.stores:
continue
store = self._get_store_for(node)
# Do not add again. The output should be the same.
if batch_index in store:
continue
store[batch_index] = values
def remove_batch(self, batch_index):
"""Remove the batch from all stores."""
for store in self.stores.values():
if batch_index in store:
del store[batch_index]
def has_store(self, node):
"""Check if `node` is in stores."""
return node in self.stores
def get_store(self, node):
"""Return the store for `node`."""
return self.stores[node]
def add_store(self, node, store=None):
"""Add a store object for the node.
Parameters
----------
node : str
store : dict, StoreBase, optional
"""
if node in self.stores and self.stores[node] is not None:
raise ValueError("Store for '{}' already exists".format(node))
store = store if store is not None else self._make_store_for(node)
self.stores[node] = store
def remove_store(self, node):
"""Remove and return a store from the pool.
Parameters
----------
node : str
Returns
-------
store
The removed store
"""
store = self.stores.pop(node)
return store
def _get_store_for(self, node):
"""Get or make a store."""
if self.stores[node] is None:
self.stores[node] = self._make_store_for(node)
return self.stores[node]
def _make_store_for(self, node):
"""Make a default store for a node.
All the default stores will be created through this method.
"""
return {}
def __len__(self):
"""Return the largest batch index in any of the stores."""
largest = 0
for output, store in self.stores.items():
if store is None:
continue
largest = max(largest, len(store))
return largest
def __getitem__(self, batch_index):
"""Return the batch."""
return self.get_batch(batch_index)
def __setitem__(self, batch_index, batch):
"""Add `batch` into location `batch_index`."""
return self.add_batch(batch, batch_index)
def __contains__(self, batch_index):
"""Check if the pool contains `batch_index`."""
return len(self) > batch_index
def clear(self):
"""Remove all data from the stores."""
for store in self.stores.values():
store.clear()
def save(self):
"""Save the pool to disk.
This will use pickle to store the pool under self.path.
"""
if not self.has_context:
raise ValueError("Pool context is not set, cannot save. Please see the "
"set_context method.")
os.makedirs(self.path, exist_ok=True)
# Change the working directory so that relative paths to the pool data folder can
# be reliably used. This allows moving and renaming of the folder.
cwd = os.getcwd()
os.chdir(self.path)
# Pickle the stores separately
for node, store in self.stores.items():
filename = node + '.pkl'
try:
pickle.dump(store, open(filename, 'wb'))
except BaseException:
raise IOError('Failed to pickle the store for node {}, please check that '
'it is pickleable or remove it before saving.'.format(node))
os.chdir(cwd)
# Save the pool itself with stores replaced with Nones
stores = self.stores
self.stores = dict.fromkeys(stores.keys())
filename = os.path.join(self.path, self._pkl_name)
pickle.dump(self, open(filename, "wb"))
# Restore the original to the object
self.stores = stores
def close(self):
"""Save and close the stores that support it.
The pool will not be usable afterwards.
"""
self.save()
for store in self.stores.values():
if hasattr(store, 'close'):
store.close()
def flush(self):
"""Flush all data from the stores.
If the store does not support flushing, do nothing.
"""
for store in self.stores.values():
if hasattr(store, 'flush'):
store.flush()
def delete(self):
"""Remove all persisted data from disk."""
for store in self.stores.values():
if hasattr(store, 'close'):
store.close()
if self.path is None:
return
elif not os.path.exists(self.path):
return
shutil.rmtree(self.path)
@classmethod
def open(cls, name, prefix=None):
"""Open a closed or saved ArrayPool from disk.
Parameters
----------
name : str
prefix : str, optional
Returns
-------
ArrayPool
"""
prefix = prefix or _default_prefix
path = cls._make_path(name, prefix)
filename = os.path.join(path, cls._pkl_name)
pool = pickle.load(open(filename, "rb"))
# Load the stores. Change the working directory temporarily so that pickled stores
# can find their data dependencies even if the folder has been renamed.
cwd = os.getcwd()
os.chdir(path)
for node in list(pool.stores.keys()):
filename = node + '.pkl'
try:
store = pickle.load(open(filename, 'rb'))
except Exception as e:
logger.warning('Failed to load the store for node {}. Reason: {}'
.format(node, str(e)))
del pool.stores[node]
continue
pool.stores[node] = store
os.chdir(cwd)
# Update the name and prefix in case the pool folder was moved
pool.name = name
pool.prefix = prefix
return pool
@classmethod
def _make_path(cls, name, prefix):
return os.path.join(prefix, name)
@property
def path(self):
"""Return the path to the pool."""
if self.name is None:
return None
return self._make_path(self.name, self.prefix)
class ArrayPool(OutputPool):
"""OutputPool that uses binary .npy files as default stores.
The default store medium for output data is a NumPy binary `.npy` file for NumPy
array data. You can however also add other types of stores as well.
Notes
-----
The default store is implemented in elfi.store.NpyStore that uses NpyArrays as stores.
The NpyArray is a wrapper over NumPy .npy binary file for array data and supports
appending the .npy file. It uses the .npy format 2.0 files.
"""
def _make_store_for(self, node):
if not self.has_context:
raise ValueError('ArrayPool has no context set')
# Make the directory for the array pools
os.makedirs(self.path, exist_ok=True)
filename = os.path.join(self.path, node)
return NpyStore(filename, self.batch_size)
class StoreBase:
"""Base class for output stores for the pools.
Stores store the outputs of a single node in ElfiModel. This is a subset of the
Python dictionary api.
Notes
-----
Any dictionary like object will work directly as an ELFI store.
"""
def __getitem__(self, batch_index):
"""Return a batch from location `batch_index`."""
raise NotImplementedError
def __setitem__(self, batch_index, data):
"""Set array to `data` at location `batch_index`."""
raise NotImplementedError
def __delitem__(self, batch_index):
"""Delete data from location `batch_index`."""
raise NotImplementedError
def __contains__(self, batch_index):
"""Check if array contains `batch_index`."""
raise NotImplementedError
def __len__(self):
"""Return the number of batches in the store."""
raise NotImplementedError
def clear(self):
"""Remove all batches from the store."""
raise NotImplementedError
def close(self):
"""Close the store.
Optional method. Useful for closing i.e. file streams.
"""
pass
def flush(self):
"""Flush the store.
Optional to implement.
"""
pass
# TODO: add mask for missing items. It should replace the use of `n_batches`.
# This should make it possible to also append further than directly to the end
# of current n_batches index.
class ArrayStore(StoreBase):
"""Convert any array object to ELFI store to be used within a pool.
This class is intended to make it easy to use objects that support array indexing
as outputs stores for nodes.
Attributes
----------
array : array-like
The array that the batches are stored to
batch_size : int
n_batches : int
How many batches are available from the underlying array.
"""
def __init__(self, array, batch_size, n_batches=-1):
"""Initialize ArrayStore.
Parameters
----------
array
Any array like object supporting Python list indexing
batch_size : int
Size of a batch of data
n_batches : int, optional
How many batches should be made available from the array. Default is -1
meaning all available batches.
"""
if n_batches == -1:
if len(array) % batch_size != 0:
logger.warning("The array length is not divisible by the batch size.")
n_batches = len(array) // batch_size
self.array = array
self.batch_size = batch_size
self.n_batches = n_batches
def __getitem__(self, batch_index):
"""Return a batch from location `batch_index`."""
sl = self._to_slice(batch_index)
return self.array[sl]
def __setitem__(self, batch_index, data):
"""Set array to `data` at location `batch_index`."""
if batch_index > self.n_batches:
raise IndexError("Appending further than to the end of the store array is "
"currently not supported.")
sl = self._to_slice(batch_index)
if sl.stop > len(self.array):
raise IndexError("There is not enough space left in the store array.")
self.array[sl] = data
if batch_index == self.n_batches:
self.n_batches += 1
def __contains__(self, batch_index):
"""Check if array contains `batch_index`."""
return batch_index < self.n_batches
def __delitem__(self, batch_index):
"""Delete data from location `batch_index`."""
if batch_index not in self:
raise IndexError("Cannot remove, batch index {} is not in the array"
.format(batch_index))
elif batch_index != self.n_batches - 1:
raise IndexError("Removing batches from the middle of the store array is "
"currently not supported.")
# Move the n_batches index down
if batch_index == self.n_batches - 1:
self.n_batches -= 1
def __len__(self):
"""Return the number of batches in store."""
return self.n_batches
def _to_slice(self, batch_index):
"""Return a slice object that covers the batch at `batch_index`."""
a = self.batch_size * batch_index
return slice(a, a + self.batch_size)
def clear(self):
"""Clear array from store."""
if hasattr(self.array, 'clear'):
self.array.clear()
self.n_batches = 0
def flush(self):
"""Flush any changes in memory to array."""
if hasattr(self.array, 'flush'):
self.array.flush()
def close(self):
"""Close array."""
if hasattr(self.array, 'close'):
self.array.close()
def __del__(self):
"""Close array."""
self.close()
class NpyStore(ArrayStore):
"""Store data to binary .npy files.
Uses the NpyArray objects as an array store.
"""
def __init__(self, file, batch_size, n_batches=-1):
"""Initialize NpyStore.
Parameters
----------
file : NpyArray or str
NpyArray object or path to the .npy file
batch_size
n_batches : int, optional
How many batches to make available from the file. Default -1 indicates that
all available batches.
"""
array = file if isinstance(file, NpyArray) else NpyArray(file)
super(NpyStore, self).__init__(array, batch_size, n_batches)
def __setitem__(self, batch_index, data):
"""Set array to `data` at location `batch_index`."""
sl = self._to_slice(batch_index)
# NpyArray supports appending
if batch_index == self.n_batches and sl.start == len(self.array):
self.array.append(data)
self.n_batches += 1
return
super(NpyStore, self).__setitem__(batch_index, data)
def __delitem__(self, batch_index):
"""Delete data from location `batch_index`."""
super(NpyStore, self).__delitem__(batch_index)
sl = self._to_slice(batch_index)
self.array.truncate(sl.start)
def delete(self):
"""Delete array."""
self.array.delete()
class NpyArray:
"""Extension to NumPy's .npy format.
The NpyArray is a wrapper over NumPy .npy binary file for array data and supports
appending the .npy file.
Notes
-----
- Supports only binary files.
- Supports only .npy version 2.0
- See numpy.lib.npformat for documentation of the .npy format
"""
MAX_SHAPE_LEN = 2**64
# Version 2.0 header prefix length
HEADER_DATA_OFFSET = 12
HEADER_DATA_SIZE_OFFSET = 8
def __init__(self, filename, array=None, truncate=False):
"""Initialize NpyArray.
Parameters
----------
filename : str
File name
array : ndarray, optional
Initial array
truncate : bool
Whether to truncate the file or not
"""
self.header_length = None
self.itemsize = None
# Header data fields
self.shape = None
self.fortran_order = False
self.dtype = None
# The header bytes must be prepared in advance, because there is an import in
# `numpy.lib.format._write_array_header` (1.11.3) that fails if the program is
# being closed on exception and would corrupt the .npy file.
self._header_bytes_to_write = None
if filename[-4:] != '.npy':
filename += '.npy'
self.filename = filename
if array is not None:
truncate = True
self.fs = None
if truncate is False and os.path.exists(self.filename):
self.fs = open(self.filename, 'r+b')
self._init_from_file_header()
else:
self.fs = open(self.filename, 'w+b')
# Numpy memmap for the file array data
self._memmap = None
if array is not None:
self.append(array)
self.flush()
def __getitem__(self, sl):
"""Return a slice `sl` of data."""
return self.memmap[sl]
def __setitem__(self, sl, value):
"""Set data at slice `sl` to `value`."""
self.memmap[sl] = value
def __len__(self):
"""Return the length of array."""
return self.shape[0] if self.shape else 0
@property
def size(self):
"""Return the number of items in the array."""
return np.prod(self.shape)
def append(self, array):
"""Append data from `array` to self."""
if self.closed:
raise ValueError('Array is not opened.')
if not self.initialized:
self.init_from_array(array)
if array.shape[1:] != self.shape[1:]:
raise ValueError("Appended array is of different shape.")
elif array.dtype != self.dtype:
raise ValueError("Appended array is of different dtype.")
# Append new data
pos = self.header_length + self.size * self.itemsize
self.fs.seek(pos)
self.fs.write(array.tobytes('C'))
self.shape = (self.shape[0] + len(array), ) + self.shape[1:]
# Only prepare the header bytes, need to be flushed to take effect
self._prepare_header_data()
# Invalidate the memmap
self._memmap = None
@property
def memmap(self):
"""Return a NumPy memory map to the array data."""
if not self.initialized:
raise IndexError("NpyArray is not initialized")
if self._memmap is None:
order = 'F' if self.fortran_order else 'C'
self._memmap = np.memmap(self.fs, dtype=self.dtype, shape=self.shape,
offset=self.header_length, order=order)
return self._memmap
def _init_from_file_header(self):
"""Initialize the object from an existing file."""
self.fs.seek(self.HEADER_DATA_SIZE_OFFSET)
try:
self.shape, fortran_order, self.dtype = \
npformat.read_array_header_2_0(self.fs)
except ValueError:
raise ValueError('Npy file {} header is not 2.0 format. You can make the '
'conversion using elfi.store.NpyFile by passing the '
'preloaded array as an argument.'.format(self.filename))
self.header_length = self.fs.tell()
if fortran_order:
raise ValueError('Column major (Fortran-style) files are not supported. Please'
'translate if first to row major (C-style).')
# Determine itemsize
shape = (0, ) + self.shape[1:]
self.itemsize = np.empty(shape=shape, dtype=self.dtype).itemsize
def init_from_array(self, array):
"""Initialize the object from an array.
Sets the the header_length so large that it is possible to append to the array.
Returns
-------
h_bytes : io.BytesIO
Contains the oversized header bytes
"""
if self.initialized:
raise ValueError("The array has been initialized already!")
self.shape = (0, ) + array.shape[1:]
self.dtype = array.dtype
self.itemsize = array.itemsize
# Read header data from array and set modify it to be large for the length
# 1_0 is the same for 2_0
d = npformat.header_data_from_array_1_0(array)
d['shape'] = (self.MAX_SHAPE_LEN, ) + d['shape'][1:]
d['fortran_order'] = False
# Write a prefix for a very long array to make it large enough for appending new
# data
h_bytes = io.BytesIO()
npformat.write_array_header_2_0(h_bytes, d)
self.header_length = h_bytes.tell()
# Write header prefix to file
self.fs.seek(0)
h_bytes.seek(0)
self.fs.write(h_bytes.read(self.HEADER_DATA_OFFSET))
# Write header data for the zero length to make it a valid file
self._prepare_header_data()
self._write_header_data()
def truncate(self, length=0):
"""Truncate the array to the specified length.
Parameters
----------
length : int
Length (=`shape[0]`) of the array to truncate to. Default 0.
"""
if not self.initialized:
raise ValueError('The array must be initialized before it can be truncated. '
'Please see init_from_array.')
if self.closed:
raise ValueError('The array has been closed.')
# Reset length
self.shape = (length, ) + self.shape[1:]
self._prepare_header_data()
self.fs.seek(self.header_length + self.size * self.itemsize)
self.fs.truncate()
# Invalidate the memmap
self._memmap = None
def close(self):
"""Close the file."""
if self.initialized:
self._write_header_data()
self.fs.close()
# Invalidate the memmap
self._memmap = None
def clear(self):
"""Truncate the array to 0."""
self.truncate(0)
def delete(self):
"""Remove the file and invalidate this array."""
if self.deleted:
return
name = self.fs.name
self.close()
os.remove(name)
self.fs = None
self.header_length = None
# Invalidate the memmap
self._memmap = None
def flush(self):
"""Flush any changes in memory to array."""
self._write_header_data()
self.fs.flush()
def __del__(self):
"""Close the array."""
self.close()
def _prepare_header_data(self):
# Make header data
d = {
'shape': self.shape,
'fortran_order': self.fortran_order,
'descr': npformat.dtype_to_descr(self.dtype)
}
h_bytes = io.BytesIO()
npformat.write_array_header_2_0(h_bytes, d)
# Pad the end of the header
fill_len = self.header_length - h_bytes.tell()
if fill_len < 0:
raise OverflowError(
"File {} cannot be appended. The header is too short.".format(self.filename))
elif fill_len > 0:
h_bytes.write(b'\x20' * fill_len)
h_bytes.seek(0)
self._header_bytes_to_write = h_bytes.read()
def _write_header_data(self):
if not self._header_bytes_to_write:
return
# Rewrite header data
self.fs.seek(self.HEADER_DATA_OFFSET)
h_bytes = self._header_bytes_to_write[self.HEADER_DATA_OFFSET:]
self.fs.write(h_bytes)
# Flag bytes off as they are now written
self._header_bytes_to_write = None
@property
def deleted(self):
"""Check whether file has been deleted."""
return self.fs is None
@property
def closed(self):
"""Check if file has been deleted or closed."""
return self.deleted or self.fs.closed
@property
def initialized(self):
"""Check if file is open."""
return (not self.closed) and (self.header_length is not None)
def __getstate__(self):
"""Return a dictionary with a key `filename`."""
if not self.fs.closed:
self.flush()
return {'filename': self.filename}
def __setstate__(self, state):
"""Initialize with `filename` from dictionary `state`."""
filename = state.pop('filename')
basename = os.path.basename(filename)
if os.path.exists(filename):
self.__init__(filename)
elif os.path.exists(basename):
self.__init__(basename)
else:
self.fs = None
raise FileNotFoundError('Could not find the file {}'.format(filename))
| 30.699663 | 93 | 0.589513 | 3,422 | 27,292 | 4.56955 | 0.132963 | 0.036452 | 0.01522 | 0.00761 | 0.255292 | 0.174202 | 0.130204 | 0.093432 | 0.081985 | 0.066509 | 0 | 0.003651 | 0.317566 | 27,292 | 888 | 94 | 30.734234 | 0.835919 | 0.310128 | 0 | 0.34279 | 0 | 0 | 0.082934 | 0 | 0 | 0 | 0 | 0.001126 | 0 | 1 | 0.167849 | false | 0.007092 | 0.016548 | 0.002364 | 0.288416 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5b265edde92e0233b9d674033af1f38ca23a8c4 | 2,086 | py | Python | packages/legacy/bundles/reactor_anu_freemodel_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | 5 | 2019-10-14T01:06:57.000Z | 2021-02-02T16:33:06.000Z | packages/legacy/bundles/reactor_anu_freemodel_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null | packages/legacy/bundles/reactor_anu_freemodel_v01.py | gnafit/gna | c1a58dac11783342c97a2da1b19c97b85bce0394 | [
"MIT"
] | null | null | null |
from load import ROOT as R
import gna.constructors as C
import numpy as N
from collections import OrderedDict
from gna.bundle import *
from scipy.interpolate import interp1d
class reactor_anu_freemodel_v01(TransformationBundleLegacy):
debug = False
def __init__(self, *args, **kwargs):
super(reactor_anu_freemodel_v01, self).__init__( *args, **kwargs )
self.edges = self.shared.reactor_anu_edges.data()
self.bundles=OrderedDict( self=self )
def build(self):
with self.common_namespace:
npar_raw_t = C.VarArray(self.variables, ns=self.common_namespace)
nsname = self.common_namespace.name
if self.cfg.varmode=='log':
npar_raw_t.vararray.setLabel('Spec pars:\nlog(n_i)')
npar_t = R.Exp(ns=self.common_namespace)
npar_t.exp.points( npar_raw_t )
npar_t.exp.setLabel('n_i')
self.objects['npar_log'] = npar_raw_t
else:
npar_raw_t.vararray.setLabel('n_i')
npar_t = npar_raw_t
for ns in self.namespaces:
"""Store data"""
self.transformations_out[ns.name] = npar_t.transformations[0]
self.outputs[ns.name] = npar_t.single()
self.objects['corrections'] = npar_t
def define_variables(self):
varmode = self.cfg.varmode
if not varmode in ['log', 'plain']:
raise Exception('Unknown varmode (should be log or plain): '+str(varmode))
self.variables=[]
for i in range(self.edges.size):
name = self.cfg.varname.format( index=i )
self.variables.append(name)
if varmode=='log':
var=self.common_namespace.reqparameter( name, central=0.0, sigma=N.inf )
var.setLabel('Average reactor spectrum correction for {} MeV [log]'.format(self.edges[i]))
else:
var=self.common_namespace.reqparameter( name, central=1.0, sigma=N.inf )
var.setLabel('Average reactor spectrum correction for {} MeV'.format(self.edges[i]))
| 36.596491 | 106 | 0.625599 | 268 | 2,086 | 4.69403 | 0.358209 | 0.027822 | 0.09062 | 0.034976 | 0.203498 | 0.165342 | 0.165342 | 0.0938 | 0.0938 | 0.0938 | 0 | 0.00651 | 0.263663 | 2,086 | 56 | 107 | 37.25 | 0.8125 | 0 | 0 | 0.046512 | 0 | 0 | 0.096182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.139535 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5b40933fbe3173bcd69180dd440ee803d48c65e | 913 | py | Python | daily_problems/problem_201_to_300/278.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | 1 | 2019-04-18T03:29:02.000Z | 2019-04-18T03:29:02.000Z | daily_problems/problem_201_to_300/278.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | daily_problems/problem_201_to_300/278.py | rrwt/daily-coding-challenge | b16fc365fd142ebab429e605cb146c8bb0bc97a2 | [
"MIT"
] | null | null | null | """
Given an integer N, construct all possible binary search trees with N nodes.
"""
from typing import List, Optional
from daily_problems.binary_tree_node import Node, level_order_traversal
def construct_bst(start: int, end: int) -> List[Optional[Node]]:
if start > end:
return [None]
return_list = []
for index in range(start, end + 1):
left_subtrees = construct_bst(start, index - 1)
right_subtrees = construct_bst(index + 1, end)
for left in left_subtrees:
for right in right_subtrees:
root = Node(index)
root.left = left
root.right = right
return_list.append(root)
return return_list
if __name__ == "__main__":
for n in range(1, 6):
print(f"bst of size {n}")
for bst in construct_bst(0, n - 1):
level_order_traversal(bst)
print()
| 25.361111 | 76 | 0.6046 | 121 | 913 | 4.347107 | 0.413223 | 0.091255 | 0.072243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011058 | 0.306681 | 913 | 35 | 77 | 26.085714 | 0.819905 | 0.083242 | 0 | 0 | 0 | 0 | 0.027744 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.227273 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5b5b52cfec3dc0b5aa8bae4390d3e7f434ca55b | 994 | py | Python | Python3/426.convert-binary-search-tree-to-sorted-doubly-linked-list.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/426.convert-binary-search-tree-to-sorted-doubly-linked-list.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/426.convert-binary-search-tree-to-sorted-doubly-linked-list.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=426 lang=python3
#
# [426] Convert Binary Search Tree to Sorted Doubly Linked List
#
# @lc code=start
"""
# Definition for a Node.
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
"""
class Solution:
def treeToDoublyList(self, root: 'Node'):
if not root:
return
self.vals = []
self.travel_tree(root)
head = Node(0)
prenode = head
for val in self.vals:
curnode = Node(val)
curnode.left = prenode
if prenode:
prenode.right = curnode
prenode = curnode
prenode.right = head.right
head.right.left = prenode
return head.right
def travel_tree(self, node):
if not node:
return
self.travel_tree(node.left)
self.vals.append(node.val)
self.travel_tree(node.right)
# @lc code=end
| 23.116279 | 63 | 0.557344 | 121 | 994 | 4.512397 | 0.371901 | 0.07326 | 0.076923 | 0.065934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012308 | 0.346076 | 994 | 42 | 64 | 23.666667 | 0.827692 | 0.292757 | 0 | 0.086957 | 0 | 0 | 0.005814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c5ba4fc57eee7168c52e533fb62548e53edb920e | 22,668 | py | Python | joinQuant/T_0/T_0_Moni.py | LoveYakamoz/Excalibur | 55784410a8f2e742b0dc68f9fe597098dc78a4e3 | [
"Apache-2.0"
] | 3 | 2017-08-13T15:01:49.000Z | 2017-09-05T14:06:48.000Z | joinQuant/T_0/T_0_Moni.py | LoveYakamoz/Quantitative_Trading | 55784410a8f2e742b0dc68f9fe597098dc78a4e3 | [
"Apache-2.0"
] | 13 | 2017-05-18T16:18:15.000Z | 2017-07-11T14:01:30.000Z | joinQuant/T_0/T_0_Moni.py | LoveYakamoz/Excalibur | 55784410a8f2e742b0dc68f9fe597098dc78a4e3 | [
"Apache-2.0"
] | null | null | null | from jqdata import *
import numpy as np
import pandas as pd
import talib as ta
from math import isnan, floor
from math import atan
import tushare as ts
# 股票池来源
class Source(Enum):
AUTO = 0 # 程序根据波动率及股价自动从沪深300中获取股票
CLIENT = 1 # 使用用户提供的股票
g.stocks_source = Source.CLIENT # 默认使用自动的方法获得股票
g.stock_id_list_from_client = ["002506.XSHE", "600703.XSHG", "300059.XSHE", "600206.XSHG",
"002281.XSHE", "600340.XSHG", "002092.XSHE", "002440.XSHE",
"600897.XSHG", "000063.XSHE"]
g.stock_position = {"002506.XSHE": 0,
"600703.XSHG": 0,
"300059.XSHE": 0,
"600206.XSHG": 0,
"002281.XSHE": 0,
"600340.XSHG": 0,
"002092.XSHE": 0,
"002440.XSHE": 0,
"600897.XSHG": 0,
"000063.XSHE": 0}
# 持仓股票池详细信息
g.basestock_pool = []
# 用于统计结果
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# MA平均的天数
g.ma_4day_count = 4
g.ma_13day_count = 13
# 每次调整的比例
g.adjust_scale = 0.25
# 期望收益率
g.expected_revenue = 0.003
# 角度阈值
g.angle_threshold = 30
g.sampleSize = 20 # 20 or 30
g.scale = 1.0 # 倍数1.0-5倍
g.signal_buy_dict = {}
class Angle(Enum):
UP = 1 # 角度>30
MIDDLE = 0 # 角度<=30 且 角度>=-30
DOWN = -1 # 角度<-30
class Status(Enum):
INIT = 0 # 在每天交易开始时,置为INIT
WORKING = 1 # 处于买/卖中
NONE = 2 # 今天不再做任何交易
class Break(Enum):
UP = 0 # 上穿
DOWN = 1 # 下穿
NONE = 2
'''
记录股票详细信息
'''
class BaseStock(object):
def __init__(self, stock, close, min_vol, max_vol, lowest, highest, status, position, sell_order_id, buy_order_id):
self.stock = stock
self.close = close
self.min_vol = min_vol
self.max_vol = max_vol
self.lowest = lowest
self.highest = highest
self.status = status
self.position = position
self.sell_order_id = sell_order_id
self.sell_price = 0
self.buy_order_id = buy_order_id
self.buy_price = 0
self.break_throught_type = Break.NONE # 突破类型 up or down
self.break_throught_time = None # 突破时间点
self.delay_amount = 0 # 反向挂单量
self.delay_price = 0 # 反向挂单价格
self.operator_value_4 = 0
self.operator_value_13 = 0
self.angle = 1000
def print_stock(self):
log.info(
"stock: %s, close: %f, min_vol: %f, max_vol: %f, lowest: %f, hightest: %f, position: %f, sell_roder_id: %d, buy_order_id: %d, operator_value_4: %f, operator_value_13: %f"
, self.stock, self.close, self.min_vol, self.max_vol, self.lowest, self.highest, self.position,
self.sell_order_id, self.buy_order_id, self.operator_value_4, self.operator_value_13)
def get_stocks_by_client(context):
'''
直接从客户得到股票列表
'''
select_count = 0
for stock_id in g.stock_id_list_from_client:
stock_obj = BaseStock(stock_id, 0, 0, 0, 0, 0, Status.INIT, g.stock_position[stock_id], -1, -1)
stock_obj.print_stock()
g.basestock_pool.append(stock_obj)
select_count += 1
if select_count < g.position_count:
g.position_count = select_count
def get_stock_angle(context, stock):
'''ATAN((五日收盘价均线值/昨日的五日收盘均线值-1)*100)*57.3'''
df_close = get_price(stock, count=6, end_date=str(context.current_dt), frequency='daily', fields=['close'])
close_list = [item for item in df_close['close']]
yesterday_5MA = (reduce(lambda x, y: x + y, close_list) - close_list[5]) / 5
today_5MA = (reduce(lambda x, y: x + y, close_list) - close_list[0]) / 5
angle = math.atan((today_5MA / yesterday_5MA - 1) * 100) * 57.3
log.info("股票:%s的角度为:%f", stock, angle)
return angle
def evaluate_activeVolBuy(np_close, vol):
"""
主动性买盘成交量
:param np_close: 3~4 sampleSize
:param vol:
:return:
"""
diff_a1 = np.diff(np_close)
comp_vol = vol[1:]
activeVolBuy = []
activeVolSell = []
swingVol = []
accumulateNetVol = 0
netVol_buySell = []
for i in range(len(diff_a1)):
if diff_a1[i] > 0:
activeVolBuy.append(comp_vol[i])
activeVolSell.append(0)
elif diff_a1[i] < 0:
activeVolSell.append(comp_vol[i])
activeVolBuy.append(0)
else:
swingVol.append(comp_vol[i])
activeVolBuy.append(0)
activeVolSell.append(0)
for k in range(len(activeVolBuy)):
netVol = activeVolBuy[k] - activeVolSell[k]
accumulateNetVol += netVol
netVol_buySell.append(float(accumulateNetVol))
netVol_buySell_sum = np.sum(np.array(activeVolBuy)) - np.sum(np.array(activeVolSell))
print('netVol_buySell_sum=%d' % netVol_buySell_sum)
threshold_netVol = np.average(netVol_buySell[-g.sampleSize:])
if netVol_buySell[-1] > (threshold_netVol * g.scale):
g.signal_buy_dict['signal_netVol_buySell'] = 1
elif netVol_buySell[-1] < (-1) * (threshold_netVol * g.scale):
g.signal_buy_dict['signal_netVol_buySell'] = -1
return activeVolBuy, activeVolSell, netVol_buySell
def initialize(context):
log.info("---> 策略初始化 @ %s" % (str(context.current_dt)))
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# 第一天运行时,需要选股入池,并且当天不可进行股票交易
g.firstrun = True
# 默认股票池容量
g.position_count = 30
if g.stocks_source == Source.AUTO:
log.info("程序根据波动率及股价自动从沪深300中获取股票")
pass
elif g.stocks_source == Source.CLIENT:
log.info("使用用户提供的股票")
get_stocks_by_client(context)
else:
log.error("未提供获得股票方法!!!")
# 设置基准
set_benchmark('000300.XSHG')
set_option('use_real_price', True)
log.info("初始化完成")
log.info("initialize over")
# 在每天交易开始时,将状态置为可交易状态
def before_trading_start(context):
log.info("初始化买卖状态为INIT")
for i in range(g.position_count):
g.basestock_pool[i].status = Status.INIT
g.basestock_pool[i].lowest = 0
g.basestock_pool[i].highest = 0
g.basestock_pool[i].status = Status.INIT
g.basestock_pool[i].sell_order_id = -1
g.basestock_pool[i].sell_price = 0
g.basestock_pool[i].buy_order_id = -1
g.basestock_pool[i].buy_price = 0
g.basestock_pool[i].break_throught_time = None
g.basestock_pool[i].delay_amount = 0
g.basestock_pool[i].delay_price = 0
angle = get_stock_angle(context, g.basestock_pool[i].stock)
if angle > 30:
g.basestock_pool[i].angle = Angle.UP
elif angle < -30:
g.basestock_pool[i].angle = Angle.DOWN
else:
g.basestock_pool[i].angle = Angle.MIDDLE
g.repeat_signal_count = 0
g.reset_order_count = 0
g.success_count = 0
# 购买股票,并记录订单号,便于查询订单状态
def buy_stock(context, stock, amount, limit_price, index):
buy_order = order(stock, amount, LimitOrderStyle(limit_price))
g.basestock_pool[index].buy_price = limit_price
if buy_order is not None:
g.basestock_pool[index].buy_order_id = buy_order.order_id
log.info("股票: %s, 以%f价格挂单,买入%d", stock, limit_price, amount)
# 卖出股票,并记录订单号,便于查询订单状态
def sell_stock(context, stock, amount, limit_price, index):
sell_order = order(stock, amount, LimitOrderStyle(limit_price))
g.basestock_pool[index].sell_price = limit_price
if sell_order is not None:
g.basestock_pool[index].sell_order_id = sell_order.order_id
log.info("股票: %s, 以%f价格挂单,卖出%d", stock, limit_price, amount)
def sell_signal(context, stock, close_price, index):
# 每次交易量为持仓量的g.adjust_scale
amount = g.adjust_scale * g.basestock_pool[index].position
log.info("sell scale: %f, src_posiont: %d, amount: %d", g.adjust_scale, g.basestock_pool[index].position, amount)
if amount <= 100:
amount = 100
else:
if amount % 100 != 0:
amount = amount - (amount % 100)
# 以收盘价 + 0.01 挂单卖出
limit_price = close_price + 0.01
if g.basestock_pool[index].status == Status.WORKING:
log.warn("股票: %s, 收到重复卖出信号,但不做交易", stock)
elif g.basestock_pool[index].status == Status.INIT:
if g.basestock_pool[index].angle == Angle.UP:
log.warn("股票:%s, 角度大于30, 忽略卖出信号", stock)
return
sell_ret = sell_stock(context, stock, -amount, limit_price, index)
g.basestock_pool[index].break_throught_time = context.current_dt
# 以收盘价 - 价差 * expected_revenue 挂单买入
yesterday = get_price(stock, count=1, end_date=str(context.current_dt), frequency='daily', fields=['close'])
limit_price = close_price - yesterday.iat[0, 0] * g.expected_revenue
g.basestock_pool[index].delay_amount = amount
g.basestock_pool[index].delay_price = limit_price
g.basestock_pool[index].break_throught_type = Break.DOWN
g.basestock_pool[index].status = Status.WORKING # 更新交易状态
else:
log.error("股票: %s, 交易状态出错", stock)
def buy_signal(context, stock, close_price, index):
# 每次交易量为持仓量的g.adjust_scale
amount = floor(g.adjust_scale * g.basestock_pool[index].position)
log.info("buy scale: %f, src_posiont: %d, amount: %d", g.adjust_scale, g.basestock_pool[index].position, amount)
if amount <= 100:
amount = 100
else:
if amount % 100 != 0:
amount = amount - (amount % 100)
# 以收盘价 - 0.01 挂单买入
limit_price = close_price - 0.01
# 如果当前不是INIT状态,则表示已经处于一次交易中(未撮合完成)
if g.basestock_pool[index].status == Status.WORKING:
log.warn("股票: %s, 收到重复买入信号,但不做交易", stock)
elif g.basestock_pool[index].status == Status.INIT:
if g.basestock_pool[index].angle == Angle.DOWN:
log.warn("股票:%s, 角度小于-30, 忽略买入信号", stock)
return
buy_stock(context, stock, amount, limit_price, index)
g.basestock_pool[index].break_throught_time = context.current_dt
# 以收盘价 + 价差 * expected_revenue 挂单卖出
yesterday = get_price(stock, count=1, end_date=str(context.current_dt), frequency='daily', fields=['close'])
limit_price = close_price + yesterday.iat[0, 0] * g.expected_revenue
g.basestock_pool[index].delay_amount = -amount
g.basestock_pool[index].delay_price = limit_price
g.basestock_pool[index].break_throught_type = Break.UP
g.basestock_pool[index].status = Status.WORKING # 更新交易状态
else:
log.error("股票: %s, 交易状态出错", stock)
# 计算当前时间点,是开市以来第几分钟
def get_minute_count(current_dt):
'''
9:30 -- 11:30
13:00 --- 15:00
'''
current_hour = current_dt.hour
current_min = current_dt.minute
if current_hour < 12:
minute_count = (current_hour - 9) * 60 + current_min - 30
else:
minute_count = (current_hour - 13) * 60 + current_min + 120
return minute_count
# 获取89分钟内的最低价,不足89分钟,则计算到当前时间点
def update_89_lowest(context):
minute_count = get_minute_count(context.current_dt)
if minute_count > 89:
minute_count = 89
for i in range(g.position_count):
low_df = get_price(g.basestock_pool[i].stock, count=minute_count, end_date=str(context.current_dt),
frequency='1m', fields=['low'])
g.basestock_pool[i].lowest_89 = min(low_df['low'])
# 获取233分钟内的最高价,不足233分钟,则计算到当前时间点
def update_233_highest(context):
minute_count = get_minute_count(context.current_dt)
if minute_count > 233:
minute_count = 233
for i in range(g.position_count):
high_df = get_price(g.basestock_pool[i].stock, count=minute_count, end_date=str(context.current_dt),
frequency='1m', fields=['high'])
g.basestock_pool[i].highest_233 = max(high_df['high'])
# high_df.sort(['high'], ascending = False).iat[0,0]
# 取消所有未完成的订单(未撮合成的订单)
def cancel_open_order(context):
orders = get_open_orders()
for _order in orders.values():
cancel_order(_order)
# 恢复所有股票到原有仓位
def reset_position(context):
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
src_position = g.basestock_pool[i].position
cur_position = context.portfolio.positions[stock].total_amount
if src_position != cur_position:
log.info("src_position : cur_position", src_position, cur_position)
_order = order(stock, src_position - cur_position)
log.warn("reset posiont: ", _order)
g.reset_order_count += 1
def update_socket_statue(context):
orders = get_orders()
if len(orders) == 0:
return
hour = context.current_dt.hour
minute = context.current_dt.minute
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
sell_order_id = g.basestock_pool[i].sell_order_id
buy_order_id = g.basestock_pool[i].buy_order_id
status = g.basestock_pool[i].status
if (status == Status.WORKING) and ((sell_order_id != -1) and (buy_order_id != -1)):
sell_order = orders.get(sell_order_id)
buy_order = orders.get(buy_order_id)
if (sell_order is not None) and (buy_order is not None):
if sell_order.status == OrderStatus.held and buy_order.status == OrderStatus.held:
log.info("股票:%s回转交易完成 ==============> SUCCESS", stock)
g.basestock_pool[i].sell_order_id = -1
g.basestock_pool[i].buy_order_id = -1
g.basestock_pool[i].status = Status.INIT # 一次完整交易(买/卖)结束,可以进行下一次交易
g.basestock_pool[i].buy_price = 0
g.basestock_pool[i].sell_price = 0
g.basestock_pool[i].delay_amount = 0
g.basestock_pool[i].delay_price = 0
g.basestock_pool[i].break_throught_time = None
g.basestock_pool[i].break_throught_type = Break.NONE
g.success_count += 1
# 每天14点后, 不再进行新的买卖
if hour == 14 and g.basestock_pool[i].status == Status.INIT:
g.basestock_pool[i].status = Status.NONE
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
sell_order_id = g.basestock_pool[i].sell_order_id
buy_order_id = g.basestock_pool[i].buy_order_id
status = g.basestock_pool[i].status
# 买完再卖
if (status == Status.WORKING) and (sell_order_id == -1):
buy_order = orders.get(buy_order_id)
if (buy_order is not None):
if buy_order.status == OrderStatus.held:
log.info("买完再卖: stock %s, delay_amount: %d", stock, g.basestock_pool[i].delay_amount)
sell_stock(context, stock, g.basestock_pool[i].delay_amount, g.basestock_pool[i].delay_price, i)
# 卖完再买
if (status == Status.WORKING) and (buy_order_id == -1):
sell_order = orders.get(sell_order_id)
if (sell_order is not None):
if sell_order.status == OrderStatus.held:
log.info("卖完再买: stock %s, delay_amount: %d", stock, g.basestock_pool[i].delay_amount)
buy_stock(context, stock, g.basestock_pool[i].delay_amount, g.basestock_pool[i].delay_price, i)
def get_delta_minute(datetime1, datetime2):
minute1 = get_minute_count(datetime1)
minute2 = get_minute_count(datetime2)
return abs(minute2 - minute1)
def price_and_volume_up(context, stock):
df = get_price(stock, end_date=context.current_dt, count=3, frequency='1m', fields=['close', 'volume'])
if (df['close'][0] < df['close'][1] < df['close'][2]) and (df['volume'][0] < df['volume'][1] < df['volume'][2]):
log.info("量价买入:%s, close: %f, %f, %f; volume: %d, %d, %d", stock, df['close'][0], df['close'][1],
df['close'][2],
df['volume'][0], df['volume'][1], df['volume'][2])
return True
else:
return False
def handle_data(context, data):
if str(context.run_params.start_date) == str(context.current_dt.strftime("%Y-%m-%d")):
if g.firstrun is True:
for i in range(g.position_count):
myorder = order_value(g.basestock_pool[i].stock, 100000)
if myorder is not None:
g.basestock_pool[i].position = myorder.amount
else:
log.error("股票: %s 买入失败", g.basestock_pool[i].stock)
log.info("====================================================================")
for i in range(g.position_count):
g.basestock_pool[i].print_stock()
g.firstrun = False
return
hour = context.current_dt.hour
minute = context.current_dt.minute
# 每天14点55分钟 将未完成的订单强制恢复到原有持仓量
if hour == 14 and minute == 55:
cancel_open_order(context)
reset_position(context)
# 14点00分钟后, 不再有新的交易
if hour == 14 and minute >= 0:
return
# 因为要计算移动平均线,所以每天前g.ma_13day_count分钟,不做交易
if get_minute_count(context.current_dt) < g.ma_13day_count:
# log.info("13分钟后才有交易")
return
# 更新89分钟内的最低收盘价,不足89分钟,则按到当前时间的最低收盘价
update_89_lowest(context)
# 更新233分钟内的最高收盘价,不足233分钟,则按到当前时间的最高收盘价
update_233_highest(context)
# 根据订单状态来更新,如果交易均结束(买与卖均成交),则置为INIT状态,表示可以再进行交易
update_socket_statue(context)
# 1. 循环股票列表,看当前价格是否有买入或卖出信号
for i in range(g.position_count):
stock = g.basestock_pool[i].stock
if isnan(g.basestock_pool[i].lowest_89) is True:
log.error("stock: %s's lowest_89 is None", stock)
continue
else:
lowest_89 = g.basestock_pool[i].lowest_89
if isnan(g.basestock_pool[i].highest_233) is True:
log.error("stock: %s's highest_233 is None", stock)
continue
else:
highest_233 = g.basestock_pool[i].highest_233
if g.basestock_pool[i].status == Status.NONE:
continue
# 如果在开市前几分钟,价格不变化,则求突破线时,会出现除数为0,如果遇到这种情况,表示不会有突破,所以直接过掉
if lowest_89 == highest_233:
continue
# 求取当前是否有突破
close_m = get_price(stock, count=g.ma_13day_count, end_date=str(context.current_dt), frequency='1m',
fields=['close'])
close_4 = array([0.0, 0.0, 0.0, 0.0], dtype=float)
close_13 = array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=float)
for j in range(g.ma_13day_count):
close_13[j] = close_m.iat[j, 0]
for j in range(g.ma_13day_count):
close_13[j] = ((close_13[j] - lowest_89) * 1.0 / (highest_233 - lowest_89)) * 4
close_4 = close_13[9:]
if close_13 is not None:
operator_line_13 = 0
operator_line_4 = 0
for item in close_13:
operator_line_13 += item
for item in close_4:
operator_line_4 += item
operator_line_13 = operator_line_13 / g.ma_13day_count
operator_line_4 = operator_line_4 / g.ma_4day_count
else:
log.warn("股票: %s 可能由于停牌等原因无法求解MA", stock)
continue
count_number = g.sampleSize * 4
df = get_price(stock, count=count_number, end_date=str(context.current_dt), frequency='1m',
fields=['close', 'volume'])
np_close = []
vol = []
print(df)
for k in range(count_number):
np_close.append(df.iat[k, 0])
vol.append(df.iat[k, 1])
evaluate_activeVolBuy(np.array(np_close), np.array(vol))
# 买入信号产生
if g.signal_buy_dict['signal_netVol_buySell'] == 1:
log.info("主动买入:%s", stock)
buy_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
g.signal_buy_dict['signal_netVol_buySell'] = 0
elif g.signal_buy_dict['signal_netVol_buySell'] == -1:
log.info("主动卖出:%s", stock)
sell_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
g.signal_buy_dict['signal_netVol_buySell'] = 0
elif ((g.basestock_pool[i].operator_value_4 < g.basestock_pool[i].operator_value_13) and (
operator_line_4 > operator_line_13) and (operator_line_13 < 0.3) and (
close_m.iat[g.ma_13day_count - 1, 0] > close_m.iat[g.ma_13day_count - 2, 0] * 0.97)):
log.info(
"金叉买入:%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f",
stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13,
operator_line_13, close_m.iat[g.ma_4day_count - 1, 0], close_m.iat[g.ma_13day_count - 2, 0], lowest_89,
highest_233)
buy_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
# 卖出信号产生
elif ((g.basestock_pool[i].operator_value_4 > g.basestock_pool[i].operator_value_13) and (
operator_line_4 < operator_line_13) and (operator_line_13 > 3.7) and (
close_m.iat[g.ma_13day_count - 1, 0] < close_m.iat[g.ma_13day_count - 2, 0] * 1.03)):
log.info(
"死叉卖出:%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f",
stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13,
operator_line_13, close_m.iat[g.ma_4day_count - 1, 0], close_m.iat[g.ma_13day_count - 2, 0], lowest_89,
highest_233)
sell_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
# 价格与成交量均上涨,也是买入信号
elif price_and_volume_up(context, stock):
buy_signal(context, stock, close_m.iat[g.ma_13day_count - 1, 0], i)
else:
# log.info("%s, ma_4 from %f to %f, ma_13 from %f to %f, close_price: %f, yesterday_close_price: %f, lowest_89: %f, highest_233: %f", stock, g.basestock_pool[i].operator_value_4, operator_line_4, g.basestock_pool[i].operator_value_13, operator_line_13, close_m.iat[g.ma_4day_count-1,0], close_m.iat[g.ma_13day_count-2,0], lowest_89, highest_233)
pass
g.basestock_pool[i].operator_value_4 = operator_line_4
g.basestock_pool[i].operator_value_13 = operator_line_13
def after_trading_end(context):
log.info("===========================================================================")
log.info("[统计数据]成功交易次数: %d, 重复信号交易次数: %d, 收盘前强制交易次数: %d", g.success_count, g.repeat_signal_count,
g.reset_order_count)
log.info("===========================================================================")
| 36.918567 | 357 | 0.615008 | 3,156 | 22,668 | 4.177757 | 0.117554 | 0.071293 | 0.09981 | 0.077361 | 0.583694 | 0.532499 | 0.494729 | 0.470611 | 0.447706 | 0.429503 | 0 | 0.043816 | 0.254941 | 22,668 | 613 | 358 | 36.978793 | 0.73687 | 0.069217 | 0 | 0.299539 | 0 | 0.009217 | 0.089177 | 0.020623 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0.004608 | 0.016129 | 0 | 0.129032 | 0.011521 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |