content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
"""GoldenTimes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
from django.views.generic import RedirectView
urlpatterns = [
# url(r'^$', RedirectView.as_view(url='http://liujin.jios.org:8888')),
url(r'^$', RedirectView.as_view(url='/portal/')),
url(r'^admin/', admin.site.urls),
url(r'^portal/', include('portal.urls')),
url(r'^api/', include('api.urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| GoldenTimes/urls.py | 1,414 | GoldenTimes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
url(r'^$', RedirectView.as_view(url='http://liujin.jios.org:8888')), | 708 | en | 0.618022 |
import sys
##print ("This is the name of the script: ", sys.argv[0])
##print ("Number of arguments: ", len(sys.argv))
##print ("The arguments are: " , str(sys.argv))
lemmas = []
lemmas_cleaned = []
nums = ['1','2','3','4','5','6','7','8','9','0']
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','k','l','m','n','o',
'p','q','r','s','t','u','v','w','x','y','z',
'ā','â','ā',
'ê','ė', 'ē',
'ô','ō',
'ū','û',
'ī','î']
POS_tags = ['as., st. V. (1)', 'as., st. V. (2)', 'as., st. V. (3a)', 'as., st. V. (3b)', 'as., st. V. (4)', 'as., st. V. (5)', 'as., st. V. (6)', 'as., st. V. (7)', \
'as., sw. V. (1a)', 'as., sw. V. (1b)', 'as., sw. V. (2)', \
'as., red. V.',\
'as., st. M. (a)']
def dict_scrape(POS, dictionaryfile='as_freq.txt'):
"""Scrapes a dictionary for a given part of speech. POS tags in POS_tags.
POS(str), dictionaryfile(str-of-filename) -> list-of-strings
"""
if POS in POS_tags:
with open(dictionaryfile) as to_scrape:
for line in to_scrape:
if POS in line:
lemmas.append(line)
for line in lemmas:
#1, bāga, as., st. F. (ō)?, sw. F. (n)?: nhd. Streit
i=0
for char in line[:44]:
if char not in alphabet:
i=i+1
lemmas_cleaned.append(line[i:].strip().replace('*','').replace('?','')+'\n')
#scrub line of the frequency data, begin with headword?
## print("Found " + str(len(lemmas_cleaned)) + " lemmas matching that category")
return lemmas_cleaned
dict_scrape(sys.argv[1])
for line in lemmas_cleaned:
with open(sys.argv[1][5:],'w+') as to_write:
to_write.write(line)
| dictionaries/archives/dict_scrape.py | 1,785 | Scrapes a dictionary for a given part of speech. POS tags in POS_tags.
POS(str), dictionaryfile(str-of-filename) -> list-of-strings
print ("This is the name of the script: ", sys.argv[0])print ("Number of arguments: ", len(sys.argv))print ("The arguments are: " , str(sys.argv))1, bāga, as., st. F. (ō)?, sw. F. (n)?: nhd. Streitscrub line of the frequency data, begin with headword? print("Found " + str(len(lemmas_cleaned)) + " lemmas matching that category") | 504 | en | 0.546791 |
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import datetime
import uuid
from types import ListType
from shutil import rmtree
from os.path import join, exists, dirname, basename, abspath, normpath, splitext
from os.path import relpath
from os import linesep, remove, makedirs
from time import time
from intelhex import IntelHex
from json import load, dump
from tools.utils import mkdir, run_cmd, run_cmd_ext, NotSupportedException,\
ToolException, InvalidReleaseTargetException, intelhex_offset
from tools.paths import MBED_CMSIS_PATH, MBED_TARGETS_PATH, MBED_LIBRARIES,\
MBED_HEADER, MBED_DRIVERS, MBED_PLATFORM, MBED_HAL, MBED_CONFIG_FILE,\
MBED_LIBRARIES_DRIVERS, MBED_LIBRARIES_PLATFORM, MBED_LIBRARIES_HAL,\
BUILD_DIR
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.libraries import Library
from tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
from tools.config import Config
RELEASE_VERSIONS = ['2', '5']
def prep_report(report, target_name, toolchain_name, id_name):
"""Setup report keys
Positional arguments:
report - the report to fill
target_name - the target being used
toolchain_name - the toolchain being used
id_name - the name of the executable or library being built
"""
if not target_name in report:
report[target_name] = {}
if not toolchain_name in report[target_name]:
report[target_name][toolchain_name] = {}
if not id_name in report[target_name][toolchain_name]:
report[target_name][toolchain_name][id_name] = []
def prep_properties(properties, target_name, toolchain_name, vendor_label):
"""Setup test properties
Positional arguments:
properties - the dict to fill
target_name - the target the test is targeting
toolchain_name - the toolchain that will compile the test
vendor_label - the vendor
"""
if not target_name in properties:
properties[target_name] = {}
if not toolchain_name in properties[target_name]:
properties[target_name][toolchain_name] = {}
properties[target_name][toolchain_name]["target"] = target_name
properties[target_name][toolchain_name]["vendor"] = vendor_label
properties[target_name][toolchain_name]["toolchain"] = toolchain_name
def create_result(target_name, toolchain_name, id_name, description):
"""Create a result dictionary
Positional arguments:
target_name - the target being built for
toolchain_name - the toolchain doing the building
id_name - the name of the executable or library being built
description - a human readable description of what's going on
"""
cur_result = {}
cur_result["target_name"] = target_name
cur_result["toolchain_name"] = toolchain_name
cur_result["id"] = id_name
cur_result["description"] = description
cur_result["elapsed_time"] = 0
cur_result["output"] = ""
return cur_result
def add_result_to_report(report, result):
"""Add a single result to a report dictionary
Positional arguments:
report - the report to append to
result - the result to append
"""
result["date"] = datetime.datetime.utcnow().isoformat()
result["uuid"] = str(uuid.uuid1())
target = result["target_name"]
toolchain = result["toolchain_name"]
id_name = result['id']
result_wrap = {0: result}
report[target][toolchain][id_name].append(result_wrap)
def get_config(src_paths, target, toolchain_name):
"""Get the configuration object for a target-toolchain combination
Positional arguments:
src_paths - paths to scan for the configuration files
target - the device we are building for
toolchain_name - the string that identifies the build tools
"""
# Convert src_paths to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(src_paths, None, target, toolchain_name)
# Scan src_path for config files
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
# Update configuration files until added features creates no changes
prev_features = set()
while True:
# Update the configuration with any .json files found while scanning
toolchain.config.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(toolchain.config.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources += resources.features[feature]
prev_features = features
toolchain.config.validate_config()
if toolchain.config.has_regions:
_ = list(toolchain.config.regions)
cfg, macros = toolchain.config.get_config_data()
features = toolchain.config.get_features()
return cfg, macros, features
def is_official_target(target_name, version):
""" Returns True, None if a target is part of the official release for the
given version. Return False, 'reason' if a target is not part of the
official release for the given version.
Positional arguments:
target_name - Name if the target (ex. 'K64F')
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
result = True
reason = None
target = TARGET_MAP[target_name]
if hasattr(target, 'release_versions') \
and version in target.release_versions:
if version == '2':
# For version 2, either ARM or uARM toolchain support is required
required_toolchains = set(['ARM', 'uARM'])
if not len(required_toolchains.intersection(
set(target.supported_toolchains))) > 0:
result = False
reason = ("Target '%s' must support " % target.name) + \
("one of the folowing toolchains to be included in the") + \
((" mbed 2.0 official release: %s" + linesep) %
", ".join(required_toolchains)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(target.supported_toolchains))
elif version == '5':
# For version 5, ARM, GCC_ARM, and IAR toolchain support is required
required_toolchains = set(['ARM', 'GCC_ARM', 'IAR'])
required_toolchains_sorted = list(required_toolchains)
required_toolchains_sorted.sort()
supported_toolchains = set(target.supported_toolchains)
supported_toolchains_sorted = list(supported_toolchains)
supported_toolchains_sorted.sort()
if not required_toolchains.issubset(supported_toolchains):
result = False
reason = ("Target '%s' must support " % target.name) + \
("ALL of the folowing toolchains to be included in the") + \
((" mbed OS 5.0 official release: %s" + linesep) %
", ".join(required_toolchains_sorted)) + \
("Currently it is only configured to support the ") + \
("following toolchains: %s" %
", ".join(supported_toolchains_sorted))
elif not target.default_lib == 'std':
result = False
reason = ("Target '%s' must set the " % target.name) + \
("'default_lib' to 'std' to be included in the ") + \
("mbed OS 5.0 official release." + linesep) + \
("Currently it is set to '%s'" % target.default_lib)
else:
result = False
reason = ("Target '%s' has set an invalid release version of '%s'" %
version) + \
("Please choose from the following release versions: %s" %
', '.join(RELEASE_VERSIONS))
else:
result = False
if not hasattr(target, 'release_versions'):
reason = "Target '%s' " % target.name
reason += "does not have the 'release_versions' key set"
elif not version in target.release_versions:
reason = "Target '%s' does not contain the version '%s' " % \
(target.name, version)
reason += "in its 'release_versions' key"
return result, reason
def transform_release_toolchains(toolchains, version):
""" Given a list of toolchains and a release version, return a list of
only the supported toolchains for that release
Positional arguments:
toolchains - The list of toolchains
version - The release version string. Should be a string contained within
RELEASE_VERSIONS
"""
if version == '5':
return ['ARM', 'GCC_ARM', 'IAR']
else:
return toolchains
def get_mbed_official_release(version):
""" Given a release version string, return a tuple that contains a target
and the supported toolchains for that release.
Ex. Given '2', return (('LPC1768', ('ARM', 'GCC_ARM')),
('K64F', ('ARM', 'GCC_ARM')), ...)
Positional arguments:
version - The version string. Should be a string contained within
RELEASE_VERSIONS
"""
mbed_official_release = (
tuple(
tuple(
[
TARGET_MAP[target].name,
tuple(transform_release_toolchains(
TARGET_MAP[target].supported_toolchains, version))
]
) for target in TARGET_NAMES \
if (hasattr(TARGET_MAP[target], 'release_versions')
and version in TARGET_MAP[target].release_versions)
)
)
for target in mbed_official_release:
is_official, reason = is_official_target(target[0], version)
if not is_official:
raise InvalidReleaseTargetException(reason)
return mbed_official_release
def add_regions_to_profile(profile, config, toolchain_class):
"""Add regions to the build profile, if there are any.
Positional Arguments:
profile - the profile to update
config - the configuration object that owns the region
toolchain_class - the class of the toolchain being used
"""
if not profile:
return
regions = list(config.regions)
for region in regions:
for define in [(region.name.upper() + "_ADDR", region.start),
(region.name.upper() + "_SIZE", region.size)]:
profile["common"].append("-D%s=0x%x" % define)
active_region = [r for r in regions if r.active][0]
for define in [("MBED_APP_START", active_region.start),
("MBED_APP_SIZE", active_region.size)]:
profile["ld"].append(toolchain_class.make_ld_define(*define))
print("Using regions in this build:")
for region in regions:
print(" Region %s size 0x%x, offset 0x%x"
% (region.name, region.size, region.start))
def prepare_toolchain(src_paths, build_dir, target, toolchain_name,
macros=None, clean=False, jobs=1,
notify=None, silent=False, verbose=False,
extra_verbose=False, config=None,
app_config=None, build_profile=None):
""" Prepares resource related objects - toolchain, target, config
Positional arguments:
src_paths - the paths to source directories
target - ['LPC1768', 'LPC11U24', etc.]
toolchain_name - ['ARM', 'uARM', 'GCC_ARM', 'GCC_CR']
Keyword arguments:
macros - additional macros
clean - Rebuild everything if True
jobs - how many compilers we can run at once
notify - Notify function for logs
silent - suppress printing of progress indicators
verbose - Write the actual tools command lines used if True
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a list of mergeable build profiles
"""
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
# If the configuration object was not yet created, create it now
config = config or Config(target, src_paths, app_config=app_config)
target = config.target
try:
cur_tc = TOOLCHAIN_CLASSES[toolchain_name]
except KeyError:
raise KeyError("Toolchain %s not supported" % toolchain_name)
profile = {'c': [], 'cxx': [], 'common': [], 'asm': [], 'ld': []}
for contents in build_profile or []:
for key in profile:
profile[key].extend(contents[toolchain_name][key])
if config.has_regions:
add_regions_to_profile(profile, config, cur_tc)
toolchain = cur_tc(target, notify, macros, silent, build_dir=build_dir,
extra_verbose=extra_verbose, build_profile=profile)
toolchain.config = config
toolchain.jobs = jobs
toolchain.build_all = clean
toolchain.VERBOSE = verbose
return toolchain
def merge_region_list(region_list, destination, padding=b'\xFF'):
"""Merege the region_list into a single image
Positional Arguments:
region_list - list of regions, which should contain filenames
destination - file name to write all regions to
padding - bytes to fill gapps with
"""
merged = IntelHex()
print("Merging Regions:")
for region in region_list:
if region.active and not region.filename:
raise ToolException("Active region has no contents: No file found.")
if region.filename:
print(" Filling region %s with %s" % (region.name, region.filename))
part = intelhex_offset(region.filename, offset=region.start)
part_size = (part.maxaddr() - part.minaddr()) + 1
if part_size > region.size:
raise ToolException("Contents of region %s does not fit"
% region.name)
merged.merge(part)
pad_size = region.size - part_size
if pad_size > 0 and region != region_list[-1]:
print(" Padding region %s with 0x%x bytes" % (region.name, pad_size))
merged.puts(merged.maxaddr() + 1, padding * pad_size)
if not exists(dirname(destination)):
makedirs(dirname(destination))
print("Space used after regions merged: 0x%x" %
(merged.maxaddr() - merged.minaddr() + 1))
with open(destination, "wb+") as output:
merged.tofile(output, format='bin')
def scan_resources(src_paths, toolchain, dependencies_paths=None,
inc_dirs=None, base_path=None, collect_ignores=False):
""" Scan resources using initialized toolcain
Positional arguments
src_paths - the paths to source directories
toolchain - valid toolchain object
dependencies_paths - dependency paths that we should scan for include dirs
inc_dirs - additional include directories which should be added to
the scanner resources
"""
# Scan src_path
resources = toolchain.scan_resources(src_paths[0], base_path=base_path,
collect_ignores=collect_ignores)
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path, base_path=base_path,
collect_ignores=collect_ignores))
# Scan dependency paths for include dirs
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
resources.inc_dirs.extend(lib_resources.inc_dirs)
# Add additional include directories if passed
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Load resources into the config system which might expand/modify resources
# based on config data
resources = toolchain.config.load_resources(resources)
# Set the toolchain's configuration data
toolchain.set_config_data(toolchain.config.get_config_data())
if (hasattr(toolchain.target, "release_versions") and
"5" not in toolchain.target.release_versions and
"rtos" in toolchain.config.lib_config_data):
if "Cortex-A" in toolchain.target.core:
raise NotSupportedException(
("%s Will be supported in mbed OS 5.6. "
"To use the %s, please checkout the mbed OS 5.4 release branch. "
"See https://developer.mbed.org/platforms/Renesas-GR-PEACH/#important-notice "
"for more information") % (toolchain.target.name, toolchain.target.name))
else:
raise NotSupportedException("Target does not support mbed OS 5")
return resources
def build_project(src_paths, build_path, target, toolchain_name,
libraries_paths=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None,
macros=None, inc_dirs=None, jobs=1, silent=False,
report=None, properties=None, project_id=None,
project_description=None, extra_verbose=False, config=None,
app_config=None, build_profile=None, stats_depth=None):
""" Build a project. A project may be a test or a user program.
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the project
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
libraries_paths - The location of libraries to include when linking
linker_script - the file that drives the linker to do it's job
clean - Rebuild everything if True
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
name - the name of the project
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
project_id - the name put in the report
project_description - the human-readable version of what this thing does
extra_verbose - even more output!
config - a Config object to use instead of creating one
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
stats_depth - depth level for memap to display file/dirs
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Extend src_paths wiht libraries_paths
if libraries_paths is not None:
src_paths.extend(libraries_paths)
inc_dirs.extend(map(dirname, libraries_paths))
if clean and exists(build_path):
rmtree(build_path)
mkdir(build_path)
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
name = (name or toolchain.config.name or
basename(normpath(abspath(src_paths[0]))))
toolchain.info("Building project %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = project_description if project_description else name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain, inc_dirs=inc_dirs)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
if toolchain.config.has_regions:
res, _ = toolchain.link_program(resources, build_path, name + "_application")
region_list = list(toolchain.config.regions)
region_list = [r._replace(filename=res) if r.active else r
for r in region_list]
res = join(build_path, name) + ".bin"
merge_region_list(region_list, res)
else:
res, _ = toolchain.link_program(resources, build_path, name)
memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table', stats_depth)
if not silent:
print memap_table
# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', stats_depth, map_out)
# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', stats_depth, map_csv)
resources.detect_duplicates(toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = memap_instance.mem_report
cur_result["bin"] = res
cur_result["elf"] = splitext(res)[0] + ".elf"
cur_result.update(toolchain.report)
add_result_to_report(report, cur_result)
return res
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
else:
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, name=None, clean=False,
archive=True, notify=None, verbose=False, macros=None,
inc_dirs=None, jobs=1, silent=False, report=None,
properties=None, extra_verbose=False, project_id=None,
remove_config_header_file=False, app_config=None,
build_profile=None):
""" Build a library
Positional arguments:
src_paths - a path or list of paths that contain all files needed to build
the library
build_path - the directory where all of the object files will be placed
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
dependencies_paths - The location of libraries to include when linking
name - the name of the library
clean - Rebuild everything if True
archive - whether the library will create an archive file
notify - Notify function for logs
verbose - Write the actual tools command lines used if True
macros - additional macros
inc_dirs - additional directories where include files may be found
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
project_id - the name that goes in the report
remove_config_header_file - delete config header file when done building
app_config - location of a chosen mbed_app.json file
build_profile - a dict of flags that will be passed to the compiler
"""
# Convert src_path to a list if needed
if type(src_paths) != ListType:
src_paths = [src_paths]
# Build path
if archive:
# Use temp path when building archive
tmp_path = join(build_path, '.temp')
mkdir(tmp_path)
else:
tmp_path = build_path
# Clean the build directory
if clean and exists(tmp_path):
rmtree(tmp_path)
mkdir(tmp_path)
# Pass all params to the unified prepare_toolchain()
toolchain = prepare_toolchain(
src_paths, build_path, target, toolchain_name, macros=macros,
clean=clean, jobs=jobs, notify=notify, silent=silent,
verbose=verbose, extra_verbose=extra_verbose, app_config=app_config,
build_profile=build_profile)
# The first path will give the name to the library
if name is None:
name = basename(normpath(abspath(src_paths[0])))
toolchain.info("Building library %s (%s, %s)" %
(name, toolchain.target.name, toolchain_name))
# Initialize reporting
if report != None:
start = time()
# If project_id is specified, use that over the default name
id_name = project_id.upper() if project_id else name.upper()
description = name
vendor_label = toolchain.target.extra_labels[0]
prep_report(report, toolchain.target.name, toolchain_name, id_name)
cur_result = create_result(toolchain.target.name, toolchain_name,
id_name, description)
cur_result['type'] = 'library'
if properties != None:
prep_properties(properties, toolchain.target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Call unified scan_resources
resources = scan_resources(src_paths, toolchain,
dependencies_paths=dependencies_paths,
inc_dirs=inc_dirs)
# Copy headers, objects and static libraries - all files needed for
# static lib
toolchain.copy_files(resources.headers, build_path, resources=resources)
toolchain.copy_files(resources.objects, build_path, resources=resources)
toolchain.copy_files(resources.libraries, build_path,
resources=resources)
toolchain.copy_files(resources.json_files, build_path,
resources=resources)
if resources.linker_script:
toolchain.copy_files(resources.linker_script, build_path,
resources=resources)
if resources.hex_files:
toolchain.copy_files(resources.hex_files, build_path,
resources=resources)
# Compile Sources
objects = toolchain.compile_sources(resources, resources.inc_dirs)
resources.objects.extend(objects)
if archive:
toolchain.build_library(objects, build_path, name)
if remove_config_header_file:
config_header_path = toolchain.get_config_header()
if config_header_path:
remove(config_header_path)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
if isinstance(exc, ToolException):
cur_result["result"] = "FAIL"
elif isinstance(exc, NotSupportedException):
cur_result["result"] = "NOT_SUPPORTED"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
######################
### Legacy methods ###
######################
def mbed2_obj_path(target_name, toolchain_name):
real_tc_name = TOOLCHAIN_CLASSES[toolchain_name].__name__
return join("TARGET_" + target_name, "TOOLCHAIN_" + real_tc_name)
def build_lib(lib_id, target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Legacy method for building mbed libraries
Positional arguments:
lib_id - the library's unique identifier
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
clean - Rebuild everything if True
verbose - Write the actual tools command lines used if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
lib = Library(lib_id)
if not lib.is_supported(target, toolchain_name):
print('Library "%s" is not yet supported on target %s with toolchain %s'
% (lib_id, target.name, toolchain_name))
return False
# We need to combine macros from parameter list with macros from library
# definition
lib_macros = lib.macros if lib.macros else []
if macros:
macros.extend(lib_macros)
else:
macros = lib_macros
src_paths = lib.source_dir
build_path = lib.build_dir
dependencies_paths = lib.dependencies
inc_dirs = lib.inc_dirs
inc_dirs_ext = lib.inc_dirs_ext
if type(src_paths) != ListType:
src_paths = [src_paths]
# The first path will give the name to the library
name = basename(src_paths[0])
if report != None:
start = time()
id_name = name.upper()
description = name
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
for src_path in src_paths:
if not exists(src_path):
error_msg = "The library source folder does not exist: %s", src_path
if report != None:
cur_result["output"] = error_msg
cur_result["result"] = "FAIL"
add_result_to_report(report, cur_result)
raise Exception(error_msg)
try:
# Toolchain instance
# Create the desired build directory structure
bin_path = join(build_path, mbed2_obj_path(target.name, toolchain_name))
mkdir(bin_path)
tmp_path = join(build_path, '.temp', mbed2_obj_path(target.name,
toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
src_paths, tmp_path, target, toolchain_name, macros=macros,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
toolchain.info("Building library %s (%s, %s)" %
(name.upper(), target.name, toolchain_name))
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
dependencies_include_dir.extend(map(dirname, lib_resources.inc_dirs))
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Add other discovered configuration data to the configuration object
for res in resources:
config.load_resources(res)
toolchain.set_config_data(toolchain.config.get_config_data())
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path,
resources=resource)
dependencies_include_dir.extend(
toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, dependencies_include_dir))
needed_update = toolchain.build_library(objects, bin_path, name)
if report != None and needed_update:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
# We do have unique legacy conventions about how we build and package the mbed
# library
def build_mbed_libs(target, toolchain_name, verbose=False,
clean=False, macros=None, notify=None, jobs=1, silent=False,
report=None, properties=None, extra_verbose=False,
build_profile=None):
""" Function returns True is library was built and false if building was
skipped
Positional arguments:
target - the MCU or board that the project will compile for
toolchain_name - the name of the build tools
Keyword arguments:
verbose - Write the actual tools command lines used if True
clean - Rebuild everything if True
macros - additional macros
notify - Notify function for logs
jobs - how many compilers we can run at once
silent - suppress printing of progress indicators
report - a dict where a result may be appended
properties - UUUUHHHHH beats me
extra_verbose - even more output!
build_profile - a dict of flags that will be passed to the compiler
"""
if report != None:
start = time()
id_name = "MBED"
description = "mbed SDK"
vendor_label = target.extra_labels[0]
cur_result = None
prep_report(report, target.name, toolchain_name, id_name)
cur_result = create_result(target.name, toolchain_name, id_name,
description)
if properties != None:
prep_properties(properties, target.name, toolchain_name,
vendor_label)
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print('%s target is not yet supported by toolchain %s' %
(target.name, toolchain_name))
print('%s target supports %s toolchain%s' %
(target.name, supported_toolchains_text, 's'
if len(target.supported_toolchains) > 1 else ''))
if report != None:
cur_result["result"] = "SKIP"
add_result_to_report(report, cur_result)
return False
try:
# Source and Build Paths
build_target = join(MBED_LIBRARIES, "TARGET_" + target.name)
build_toolchain = join(MBED_LIBRARIES, mbed2_obj_path(target.name, toolchain_name))
mkdir(build_toolchain)
# Toolchain
tmp_path = join(MBED_LIBRARIES, '.temp', mbed2_obj_path(target.name, toolchain_name))
mkdir(tmp_path)
toolchain = prepare_toolchain(
[""], tmp_path, target, toolchain_name, macros=macros,verbose=verbose,
notify=notify, silent=silent, extra_verbose=extra_verbose,
build_profile=build_profile, jobs=jobs, clean=clean)
# Take into account the library configuration (MBED_CONFIG_FILE)
config = toolchain.config
config.add_config_files([MBED_CONFIG_FILE])
toolchain.set_config_data(toolchain.config.get_config_data())
# CMSIS
toolchain.info("Building library %s (%s, %s)" %
('CMSIS', target.name, toolchain_name))
cmsis_src = MBED_CMSIS_PATH
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, build_target)
toolchain.copy_files(resources.linker_script, build_toolchain)
toolchain.copy_files(resources.bin_files, build_toolchain)
objects = toolchain.compile_sources(resources, tmp_path)
toolchain.copy_files(objects, build_toolchain)
# mbed
toolchain.info("Building library %s (%s, %s)" %
('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files([MBED_HEADER], MBED_LIBRARIES)
library_incdirs = [dirname(MBED_LIBRARIES), MBED_LIBRARIES]
for dir, dest in [(MBED_DRIVERS, MBED_LIBRARIES_DRIVERS),
(MBED_PLATFORM, MBED_LIBRARIES_PLATFORM),
(MBED_HAL, MBED_LIBRARIES_HAL)]:
resources = toolchain.scan_resources(dir)
toolchain.copy_files(resources.headers, dest)
library_incdirs.append(dest)
# Target specific sources
hal_src = MBED_TARGETS_PATH
hal_implementation = toolchain.scan_resources(hal_src)
toolchain.copy_files(hal_implementation.headers +
hal_implementation.hex_files +
hal_implementation.libraries +
[MBED_CONFIG_FILE],
build_target, resources=hal_implementation)
toolchain.copy_files(hal_implementation.linker_script, build_toolchain)
toolchain.copy_files(hal_implementation.bin_files, build_toolchain)
incdirs = toolchain.scan_resources(build_target).inc_dirs
objects = toolchain.compile_sources(hal_implementation,
library_incdirs + incdirs)
toolchain.copy_files(objects, build_toolchain)
# Common Sources
mbed_resources = None
for dir in [MBED_DRIVERS, MBED_PLATFORM, MBED_HAL]:
mbed_resources += toolchain.scan_resources(dir)
objects = toolchain.compile_sources(mbed_resources,
library_incdirs + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# way the linker search for symbols in archives. These are:
# - mbed_retarget.o: to make sure that the C standard lib symbols get
# overridden
# - mbed_board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various
# weak SDK functions
# - mbed_main.o: this contains main redirection
separate_names, separate_objects = ['mbed_retarget.o', 'mbed_board.o',
'mbed_overrides.o', 'mbed_main.o', 'mbed_sdk_boot.o'], []
for obj in objects:
for name in separate_names:
if obj.endswith(name):
separate_objects.append(obj)
for obj in separate_objects:
objects.remove(obj)
toolchain.build_library(objects, build_toolchain, "mbed")
for obj in separate_objects:
toolchain.copy_files(obj, build_toolchain)
if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["result"] = "OK"
add_result_to_report(report, cur_result)
return True
except Exception as exc:
if report != None:
end = time()
cur_result["result"] = "FAIL"
cur_result["elapsed_time"] = end - start
toolchain_output = toolchain.get_output()
if toolchain_output:
cur_result["output"] += toolchain_output
cur_result["output"] += str(exc)
add_result_to_report(report, cur_result)
# Let Exception propagate
raise
def get_unique_supported_toolchains(release_targets=None):
""" Get list of all unique toolchains supported by targets
Keyword arguments:
release_targets - tuple structure returned from get_mbed_official_release().
If release_targets is not specified, then it queries all
known targets
"""
unique_supported_toolchains = []
if not release_targets:
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
else:
for target in release_targets:
for toolchain in target[1]:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
if "ARM" in unique_supported_toolchains:
unique_supported_toolchains.append("ARMC6")
return unique_supported_toolchains
def mcu_toolchain_list(release_version='5'):
""" Shows list of toolchains
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
columns = ["mbed OS %s" % x for x in RELEASE_VERSIONS] + unique_supported_toolchains
return "\n".join(columns)
def mcu_target_list(release_version='5'):
""" Shows target list
"""
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
return "\n".join(target_names)
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None,
release_version='5'):
""" Shows target map using prettytable
Keyword arguments:
verbose_html - emit html instead of a simple table
platform_filter - remove results that match the string
release_version - get the matrix for this major version number
"""
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable
if isinstance(release_version, basestring):
# Force release_version to lowercase if it is a string
release_version = release_version.lower()
else:
# Otherwise default to printing all known targets and toolchains
release_version = 'all'
version_release_targets = {}
version_release_target_names = {}
for version in RELEASE_VERSIONS:
version_release_targets[version] = get_mbed_official_release(version)
version_release_target_names[version] = [x[0] for x in
version_release_targets[
version]]
if release_version in RELEASE_VERSIONS:
release_targets = version_release_targets[release_version]
else:
release_targets = None
unique_supported_toolchains = get_unique_supported_toolchains(
release_targets)
prepend_columns = ["Target"] + ["mbed OS %s" % x for x in RELEASE_VERSIONS]
# All tests status table print
columns = prepend_columns + unique_supported_toolchains
table_printer = PrettyTable(columns)
# Align table
for col in columns:
table_printer.align[col] = "c"
table_printer.align["Target"] = "l"
perm_counter = 0
target_counter = 0
target_names = []
if release_targets:
target_names = [x[0] for x in release_targets]
else:
target_names = TARGET_NAMES
for target in sorted(target_names):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
for version in RELEASE_VERSIONS:
if target in version_release_target_names[version]:
text = "Supported"
else:
text = "-"
row.append(text)
for unique_toolchain in unique_supported_toolchains:
if (unique_toolchain in TARGET_MAP[target].supported_toolchains or
(unique_toolchain == "ARMC6" and
"ARM" in TARGET_MAP[target].supported_toolchains)):
text = "Supported"
perm_counter += 1
else:
text = "-"
row.append(text)
table_printer.add_row(row)
result = table_printer.get_html_string() if verbose_html \
else table_printer.get_string()
result += "\n"
result += "Supported targets: %d\n"% (target_counter)
if target_counter == 1:
result += "Supported toolchains: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list
Positional arguments:
target - the target to get the supported toolchains of
"""
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP \
else None
def print_build_results(result_list, build_name):
""" Generate result string for build results
Positional arguments:
result_list - the list of results to print
build_name - the name of the build we are printing result for
"""
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def print_build_memory_usage(report):
""" Generate result table with memory usage values for build results
Aggregates (puts together) reports obtained from self.get_memory_summary()
Positional arguments:
report - Report generated during build procedure.
"""
from prettytable import PrettyTable
columns_text = ['name', 'target', 'toolchain']
columns_int = ['static_ram', 'total_flash']
table = PrettyTable(columns_text + columns_int)
for col in columns_text:
table.align[col] = 'l'
for col in columns_int:
table.align[col] = 'r'
for target in report:
for toolchain in report[target]:
for name in report[target][toolchain]:
for dlist in report[target][toolchain][name]:
for dlistelem in dlist:
# Get 'memory_usage' record and build table with
# statistics
record = dlist[dlistelem]
if 'memory_usage' in record and record['memory_usage']:
# Note that summary should be in the last record of
# 'memory_usage' section. This is why we are
# grabbing last "[-1]" record.
row = [
record['description'],
record['target_name'],
record['toolchain_name'],
record['memory_usage'][-1]['summary'][
'static_ram'],
record['memory_usage'][-1]['summary'][
'total_flash'],
]
table.add_row(row)
result = "Memory map breakdown for built projects (values in Bytes):\n"
result += table.get_string(sortby='name')
return result
def write_build_report(build_report, template_filename, filename):
"""Write a build report to disk using a template file
Positional arguments:
build_report - a report generated by the build system
template_filename - a file that contains the template for the style of build
report
filename - the location on disk to write the file to
"""
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as placeholder:
placeholder.write(template.render(
failing_builds=build_report_failing,
passing_builds=build_report_passing))
def merge_build_data(filename, toolchain_report, app_type):
path_to_file = dirname(abspath(filename))
try:
build_data = load(open(filename))
except (IOError, ValueError):
build_data = {'builds': []}
for tgt in toolchain_report.values():
for tc in tgt.values():
for project in tc.values():
for build in project:
try:
build[0]['elf'] = relpath(build[0]['elf'], path_to_file)
build[0]['bin'] = relpath(build[0]['bin'], path_to_file)
except KeyError:
pass
if 'type' not in build[0]:
build[0]['type'] = app_type
build_data['builds'].append(build[0])
dump(build_data, open(filename, "wb"), indent=4, separators=(',', ': '))
| os-lib/mbed-os/tools/build_api.py | 54,457 | Convert src_paths to a list if needed Pass all params to the unified prepare_resources() Scan src_path for config files Update configuration files until added features creates no changes Update the configuration with any .json files found while scanning Add features while we find new ones For version 2, either ARM or uARM toolchain support is required For version 5, ARM, GCC_ARM, and IAR toolchain support is required We need to remove all paths which are repeated to avoid multiple compilations and linking with the same objects If the configuration object was not yet created, create it now Scan src_path Scan dependency paths for include dirs Add additional include directories if passed Load resources into the config system which might expand/modify resources based on config data Set the toolchain's configuration data Convert src_path to a list if needed Extend src_paths wiht libraries_paths The first path will give the name to the library Initialize reporting If project_id is specified, use that over the default name Call unified scan_resources Change linker script if specified Compile Sources Link Program Write output to stdout in text (pretty table) format Write output to file in JSON format Write output to file in CSV format for the CI Let Exception propagate Convert src_path to a list if needed Build path Use temp path when building archive Clean the build directory Pass all params to the unified prepare_toolchain() The first path will give the name to the library Initialize reporting If project_id is specified, use that over the default name Call unified scan_resources Copy headers, objects and static libraries - all files needed for static lib Compile Sources Let Exception propagate Legacy methods We need to combine macros from parameter list with macros from library definition The first path will give the name to the library Toolchain instance Create the desired build directory structure Take into account the library configuration (MBED_CONFIG_FILE) Scan Resources Add extra include directories / files which are required by library This files usually are not in the same directory as source files so previous scan will not include them Dependencies Include Paths Add other discovered configuration data to the configuration object Copy Headers Compile Sources Let Exception propagate We do have unique legacy conventions about how we build and package the mbed library Check toolchain support Source and Build Paths Toolchain Take into account the library configuration (MBED_CONFIG_FILE) CMSIS mbed Common Headers Target specific sources Common Sources A number of compiled files need to be copied as objects as opposed to way the linker search for symbols in archives. These are: - mbed_retarget.o: to make sure that the C standard lib symbols get overridden - mbed_board.o: mbed_die is weak - mbed_overrides.o: this contains platform overrides of various weak SDK functions - mbed_main.o: this contains main redirection Let Exception propagate Force release_version to lowercase if it is a string Otherwise default to printing all known targets and toolchains Force release_version to lowercase if it is a string Otherwise default to printing all known targets and toolchains Only use it in this function so building works without extra modules Force release_version to lowercase if it is a string Otherwise default to printing all known targets and toolchains All tests status table print Align table FIlter out platforms using regex First column is platform name Get 'memory_usage' record and build table with statistics Note that summary should be in the last record of 'memory_usage' section. This is why we are grabbing last "[-1]" record. | 3,735 | en | 0.811994 |
"""
Django settings for berlapan project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# for best-practices.
# SECURITY WARNING: keep the secret key used in production secret!
# Please set SECRET_KEY environment variable in your production environment
# (e.g. Heroku).
SECRET_KEY = os.getenv('SECRET_KEY', 'django-insecure-nk@v31jj#vq_xd)s9uns%nkmj^o0efdm$-bj7dm8jz=t76_q-c')
# Automatically determine environment by detecting if DATABASE_URL variable.
# DATABASE_URL is provided by Heroku if a database add-on is added
# (e.g. Heroku Postgres).
PRODUCTION = os.getenv('DATABASE_URL') is not None
# SECURITY WARNING: don't run with debug turned on in production!
# If you want to enable debugging on Heroku for learning purposes,
# set this to True.
DEBUG = not PRODUCTION
HEROKU_APP_NAME = os.getenv('HEROKU_APP_NAME', '')
ALLOWED_HOSTS = [f'{HEROKU_APP_NAME}.herokuapp.com']
if not PRODUCTION:
ALLOWED_HOSTS += ['.localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
'salingbantu',
'users',
'daftar_vaksinasi',
'donordarah',
'relawanvaksin',
'corsheaders',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'berlapan.urls'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS =[
'GET',
'POST',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR / 'templates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'berlapan.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Set database settings automatically using DATABASE_URL.
if PRODUCTION:
DATABASES['default'] = dj_database_url.config(
conn_max_age=600, ssl_require=True
)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# Feel free to change these according to your needs.
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# This is the directory for storing `collectstatic` results.
# This shouldn't be included in your Git repository.
STATIC_ROOT = BASE_DIR / 'staticfiles'
# You can use this directory to store project-wide static files.
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
# Make sure the directories exist to prevent errors when doing `collectstatic`.
for directory in [*STATICFILES_DIRS, STATIC_ROOT]:
directory.mkdir(exist_ok=True)
# Enable compression and caching features of whitenoise.
# You can remove this if it causes problems on your setup.
| berlapan/settings.py | 4,858 | Django settings for berlapan project.
Generated by 'django-admin startproject' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ for best-practices. SECURITY WARNING: keep the secret key used in production secret! Please set SECRET_KEY environment variable in your production environment (e.g. Heroku). Automatically determine environment by detecting if DATABASE_URL variable. DATABASE_URL is provided by Heroku if a database add-on is added (e.g. Heroku Postgres). SECURITY WARNING: don't run with debug turned on in production! If you want to enable debugging on Heroku for learning purposes, set this to True. Application definition Database https://docs.djangoproject.com/en/3.2/ref/settings/databases Set database settings automatically using DATABASE_URL. Password validation https://docs.djangoproject.com/en/3.2/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.2/topics/i18n/ Feel free to change these according to your needs. Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.2/howto/static-files/ This is the directory for storing `collectstatic` results. This shouldn't be included in your Git repository. You can use this directory to store project-wide static files. Make sure the directories exist to prevent errors when doing `collectstatic`. Enable compression and caching features of whitenoise. You can remove this if it causes problems on your setup. | 1,780 | en | 0.739455 |
'''Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.Caso esteja errado, peça
a digitação novamente até ter um valor correto.'''
sexo = str(input('Informe seu sexo: [M/F] ')).strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados inválidos. Por favor, informe seu sexo: ')).strip().upper()[0]
print('Sexo {} registrado com sucesso.'.format(sexo))
| exercicios/PythonExercicios/ex057.py | 415 | Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.Caso esteja errado, peça
a digitação novamente até ter um valor correto. | 156 | pt | 0.997607 |
""" Solver classes for domain adaptation experiments
"""
__author__ = "Steffen Schneider"
__email__ = "steffen.schneider@tum.de"
import os, time
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
import torch.utils.data
import torch.nn as nn
from .. import Solver, BaseClassSolver
from ... import layers, optim
import itertools
class DABaseSolver(BaseClassSolver):
""" Base Class for Unsupervised Domain Adaptation Approaches
"""
def __init__(self, *args, **kwargs):
super(DABaseSolver, self).__init__(*args, **kwargs)
def _init_losses(self, **kwargs):
super()._init_losses(**kwargs)
self.register_loss(layers.AccuracyScore(), name = 'acc_s', weight = None)
self.register_loss(layers.AccuracyScore(), name = 'acc_t', weight = None)
class DATeacher(Solver):
""" Base Class for Unsupervised Domain Adaptation Approaches using a teacher model
"""
def __init__(self, model, teacher, dataset, *args, **kwargs):
super().__init__(model, dataset, *args, **kwargs)
self.teacher = teacher
def _init_models(self, **kwargs):
super()._init_models(**kwargs)
self.register_model(self.teacher, 'teacher')
class DABaselineLoss(object):
def __init__(self, solver):
self.solver = solver
def _predict(self, x, y):
_ , y_ = self.solver.model(x)
if not self.solver.multiclass:
y_ = y_.squeeze()
y = y.float()
return y_, y
def __call__(self, batch):
losses = {}
(x, y) = batch[0]
losses['acc_s'] = losses['ce'] = self._predict(x,y)
with torch.no_grad():
x,y = batch[1]
losses['acc_t'] = self._predict(x,y)
return losses
class BaselineDASolver(DABaseSolver):
""" A domain adaptation solver that actually does not run any adaptation algorithm
This is useful to establish baseline results for the case of no adaptation, for measurement
of the domain shift between datasets.
"""
def _init_optims(self, lr = 3e-4, **kwargs):
super()._init_optims(**kwargs)
self.register_optimizer(torch.optim.Adam(self.model.parameters(),
lr=lr, amsgrad=True),
DABaselineLoss(self)) | salad/solver/da/base.py | 2,341 | A domain adaptation solver that actually does not run any adaptation algorithm
This is useful to establish baseline results for the case of no adaptation, for measurement
of the domain shift between datasets.
Base Class for Unsupervised Domain Adaptation Approaches
Base Class for Unsupervised Domain Adaptation Approaches using a teacher model
Solver classes for domain adaptation experiments | 405 | en | 0.887664 |
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
from pathlib import Path
import pytest
from abex.plotting.expected_basenames import expected_basenames_2d
from cellsig_sim.scripts.run_cell_signalling_loop import main
from psbutils.misc import find_subrepo_directory
# @pytest.mark.timeout(1800)
@pytest.mark.skip("Can cause ADO timeout on all platforms")
def test_tutorial_wetlab_simulation():
# For real use, we would want something like --num_iter 15 --num_runs 100, but to test everything is working,
# smaller values are sufficient, and reduce the compute time from hours to minutes.
num_iter = 2
num_runs = 3
subrepo_dir = find_subrepo_directory()
main(
[
"--spec_file",
f"{subrepo_dir}/tests/data/Specs/tutorial-wetlab-sim.yml",
"--num_iter",
str(num_iter),
"--num_runs",
str(num_runs),
"--enable_multiprocessing",
"--plot_simulated_slices",
]
)
results_dir = Path("Results") / "tutorial-wetlab-sim"
assert (results_dir / "config.yml").is_file()
for i_run in range(num_runs):
run_dir = results_dir / "fixed" / f"seed{i_run}"
assert (run_dir / "init_batch.csv").is_file()
for i_iter in range(1, num_iter + 1):
iter_dir = run_dir / f"iter{i_iter}"
assert iter_dir.is_dir()
basenames = [f.name for f in sorted(iter_dir.iterdir())]
assert basenames == expected_basenames_2d(4, variant=2)
| PyStationB/projects/CellSignalling/slow_tests/simulation/test_cellsig_tutorials.py | 1,810 | ------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. ------------------------------------------------------------------------------------------- @pytest.mark.timeout(1800) For real use, we would want something like --num_iter 15 --num_runs 100, but to test everything is working, smaller values are sufficient, and reduce the compute time from hours to minutes. | 550 | en | 0.605622 |
# Solution to Problem 8
# Program outputs today's date and time in the format "Monday, January 10th 2019 at 1:15pm"
# To start we import the Python datetime module as dt.
from datetime import datetime as dt
#now equals the date and time now.
now = dt.now()
# Copied verbatim initially from stacoverflow Reference 1 below but amended to fit my referenceing of time as now.
# Suffix equals 'st' if the date now is 1,21 or 23 else it is 'nd' if the date noe is 2 or 22 else it is 'rd' if date now is 3 or23 for eveything else it is 'th.
suffix = 'st' if now in [1,21,31] else 'nd' if now in [2, 22] else 'rd' if now in [3, 23] else 'th'
# Display to the user the Heading "Todays Date and Time:"
print("Todays Date and time:")
# Below displays to the user a the date and time in a string in inverted commas todays date and time in the format Day, Month Date year at Current Time am/pm.
# Used Reference 3 below to remove the leading 0 when desplaying the time.
print(now.strftime('%A, %B %d%%s %Y at %#I:%M %p',) % suffix,)
# Reference 1: https://stackoverflow.com/a/11645978
# Reference 2: https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/
# Reference 3: https://stackoverflow.com/questions/904928/python-strftime-date-without-leading-0One problem is that '{dt.hour}' uses a 24 hour clock :(. Using the second option still brings you back to using '{%#I}' on Windows and '{%-I}' on Unix. – ubomb May 24 '16 at 22:47
# Used lecture from week 6 as a base for the problem also looked at the Python tutorial.
# Laura Brogan 19/03/2019 | solution-8.py | 1,563 | Solution to Problem 8 Program outputs today's date and time in the format "Monday, January 10th 2019 at 1:15pm" To start we import the Python datetime module as dt.now equals the date and time now. Copied verbatim initially from stacoverflow Reference 1 below but amended to fit my referenceing of time as now. Suffix equals 'st' if the date now is 1,21 or 23 else it is 'nd' if the date noe is 2 or 22 else it is 'rd' if date now is 3 or23 for eveything else it is 'th. Display to the user the Heading "Todays Date and Time:" Below displays to the user a the date and time in a string in inverted commas todays date and time in the format Day, Month Date year at Current Time am/pm. Used Reference 3 below to remove the leading 0 when desplaying the time. Reference 1: https://stackoverflow.com/a/11645978 Reference 2: https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/ Reference 3: https://stackoverflow.com/questions/904928/python-strftime-date-without-leading-0One problem is that '{dt.hour}' uses a 24 hour clock :(. Using the second option still brings you back to using '{%I}' on Windows and '{%-I}' on Unix. – ubomb May 24 '16 at 22:47 Used lecture from week 6 as a base for the problem also looked at the Python tutorial. Laura Brogan 19/03/2019 | 1,282 | en | 0.845523 |
import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
| nets/block.py | 15,042 | Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py | 116 | en | 0.655731 |
#
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import importlib
import os
import pkgutil
import platform
import shutil
import sys
import subprocess
import traceback
YELLOW_COLOR="\033[0;33m"
RED_COLOR="\033[0;31m"
CYAN_COLOR="\033[0;36m"
NO_COLOR="\033[0m"
SEPARATOR = "-" * 80
BUILD_GROUP_COMMON = 1
BUILD_GROUP_INSTRUMENTED = 2
BUILD_TYPE_COMMON = 'common'
BUILD_TYPE_UNINSTRUMENTED = 'uninstrumented'
BUILD_TYPE_ASAN = 'asan'
BUILD_TYPE_TSAN = 'tsan'
BUILD_TYPES = [BUILD_TYPE_COMMON, BUILD_TYPE_UNINSTRUMENTED, BUILD_TYPE_ASAN, BUILD_TYPE_TSAN]
TAR_EXTRACT = 'tar xf {}'
# -o -- force overwriting existing files
ZIP_EXTRACT = 'unzip -q -o {}'
ARCHIVE_TYPES = {
'.tar.bz2': TAR_EXTRACT,
'.tar.gz': TAR_EXTRACT,
'.tar.xz': TAR_EXTRACT,
'.tgz': TAR_EXTRACT,
'.zip': ZIP_EXTRACT,
}
def fatal(message):
log(message)
traceback.print_stack()
sys.exit(1)
def log(message=""):
sys.stderr.write(message + "\n")
def colored_log(color, message):
sys.stderr.write(color + message + NO_COLOR + "\n")
def log_output(prefix, args):
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(process.stdout.readline, ''):
log("{}{} {}{}".format(CYAN_COLOR, prefix, NO_COLOR, line.rstrip()))
process.stdout.close()
exit_code = process.wait()
if exit_code:
fatal("Execution failed with code: {}".format(exit_code))
def unset_if_set(name):
if name in os.environ:
log('Unsetting {} for third-party build (was set to "{}").'.format(name, os.environ[name]))
del os.environ[name]
def log_separator():
log("")
log(SEPARATOR)
log("")
def heading(title):
log("")
log(SEPARATOR)
log(title)
log(SEPARATOR)
log("")
def is_mac():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_jenkins_user():
return os.environ['USER'] == "jenkins"
def is_jenkins():
return 'BUILD_ID' in os.environ and 'JOB_NAME' in os.environ and is_jenkins_user()
def remove_path(path):
if not os.path.exists(path):
return
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def mkdir_if_missing(path):
if os.path.exists(path):
if not os.path.isdir(path):
fatal("Trying to create dir {}, but file with the same path already exists"
.format(path))
return
os.makedirs(path)
def make_archive_name(name, version, download_url):
if download_url is None:
return '{}-{}{}'.format(name, version, '.tar.gz')
for ext in ARCHIVE_TYPES:
if download_url.endswith(ext):
return '{}-{}{}'.format(name, version, ext)
return None
def which(exe):
return subprocess.check_output(['which', exe]).rstrip()
def import_submodules(package, recursive=True):
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
class Dependency(object):
def __init__(self, name, version, url_pattern, build_group):
self.name = name
self.version = version
self.dir = '{}-{}'.format(name, version)
self.underscored_version = version.replace('.', '_')
if url_pattern is not None:
self.download_url = url_pattern.format(version, self.underscored_version)
else:
self.download_url = None
self.build_group = build_group
self.archive_name = make_archive_name(name, version, self.download_url)
self.patch_version = 0
def should_build(self, instrumented):
return True
class ExtraDownload(object):
def __init__(self, name, version, url_pattern, dir, post_exec=None):
self.name = name
self.version = version
self.download_url = url_pattern.format(version)
self.archive_name = make_archive_name(name, version, self.download_url)
self.dir = dir
if post_exec is not None:
self.post_exec = post_exec
class PushDir:
def __init__(self, dir):
self.dir = dir
self.prev = None
def __enter__(self):
self.prev = os.getcwd()
os.chdir(self.dir)
def __exit__(self, type, value, traceback):
os.chdir(self.prev)
| thirdparty/build_definitions/__init__.py | 5,181 | Copyright (c) YugaByte, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -o -- force overwriting existing files | 587 | en | 0.835045 |
#
# cbpro/order_book.py
# David Caseria
#
# Live order book updated from the Coinbase Websocket Feed
from sortedcontainers import SortedDict
from decimal import Decimal
import pickle
from cbpro.public_client import PublicClient
from cbpro.websocket_client import WebsocketClient
class OrderBook(WebsocketClient):
def __init__(self, product_id='BTC-USD', log_to=None):
super(OrderBook, self).__init__(products=product_id)
self._asks = SortedDict()
self._bids = SortedDict()
self._client = PublicClient()
self._sequence = -1
self._log_to = log_to
if self._log_to:
assert hasattr(self._log_to, 'write')
self._current_ticker = None
@property
def product_id(self):
''' Currently OrderBook only supports a single product even though it is stored as a list of products. '''
return self.products[0]
def on_open(self):
self._sequence = -1
print("-- Subscribed to OrderBook! --\n")
def on_close(self):
print("\n-- OrderBook Socket Closed! --")
def reset_book(self):
self._asks = SortedDict()
self._bids = SortedDict()
res = self._client.get_product_order_book(product_id=self.product_id, level=3)
for bid in res['bids']:
self.add({
'id': bid[2],
'side': 'buy',
'price': Decimal(bid[0]),
'size': Decimal(bid[1])
})
for ask in res['asks']:
self.add({
'id': ask[2],
'side': 'sell',
'price': Decimal(ask[0]),
'size': Decimal(ask[1])
})
self._sequence = res['sequence']
def on_message(self, message):
if self._log_to:
pickle.dump(message, self._log_to)
sequence = message.get('sequence', -1)
if self._sequence == -1:
self.reset_book()
return
if sequence <= self._sequence:
# ignore older messages (e.g. before order book initialization from getProductOrderBook)
return
elif sequence > self._sequence + 1:
self.on_sequence_gap(self._sequence, sequence)
return
msg_type = message['type']
if msg_type == 'open':
self.add(message)
elif msg_type == 'done' and 'price' in message:
self.remove(message)
elif msg_type == 'match':
self.match(message)
self._current_ticker = message
elif msg_type == 'change':
self.change(message)
self._sequence = sequence
def on_sequence_gap(self, gap_start, gap_end):
self.reset_book()
print('Error: messages missing ({} - {}). Re-initializing book at sequence.'.format(
gap_start, gap_end, self._sequence))
def add(self, order):
order = {
'id': order.get('order_id') or order['id'],
'side': order['side'],
'price': Decimal(order['price']),
'size': Decimal(order.get('size') or order['remaining_size'])
}
if order['side'] == 'buy':
bids = self.get_bids(order['price'])
if bids is None:
bids = [order]
else:
bids.append(order)
self.set_bids(order['price'], bids)
else:
asks = self.get_asks(order['price'])
if asks is None:
asks = [order]
else:
asks.append(order)
self.set_asks(order['price'], asks)
def remove(self, order):
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is not None:
bids = [o for o in bids if o['id'] != order['order_id']]
if len(bids) > 0:
self.set_bids(price, bids)
else:
self.remove_bids(price)
else:
asks = self.get_asks(price)
if asks is not None:
asks = [o for o in asks if o['id'] != order['order_id']]
if len(asks) > 0:
self.set_asks(price, asks)
else:
self.remove_asks(price)
def match(self, order):
size = Decimal(order['size'])
price = Decimal(order['price'])
if order['side'] == 'buy':
bids = self.get_bids(price)
if not bids:
return
assert bids[0]['id'] == order['maker_order_id']
if bids[0]['size'] == size:
self.set_bids(price, bids[1:])
else:
bids[0]['size'] -= size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if not asks:
return
assert asks[0]['id'] == order['maker_order_id']
if asks[0]['size'] == size:
self.set_asks(price, asks[1:])
else:
asks[0]['size'] -= size
self.set_asks(price, asks)
def change(self, order):
try:
new_size = Decimal(order['new_size'])
except KeyError:
return
try:
price = Decimal(order['price'])
except KeyError:
return
if order['side'] == 'buy':
bids = self.get_bids(price)
if bids is None or not any(o['id'] == order['order_id'] for o in bids):
return
index = [b['id'] for b in bids].index(order['order_id'])
bids[index]['size'] = new_size
self.set_bids(price, bids)
else:
asks = self.get_asks(price)
if asks is None or not any(o['id'] == order['order_id'] for o in asks):
return
index = [a['id'] for a in asks].index(order['order_id'])
asks[index]['size'] = new_size
self.set_asks(price, asks)
tree = self._asks if order['side'] == 'sell' else self._bids
node = tree.get(price)
if node is None or not any(o['id'] == order['order_id'] for o in node):
return
def get_current_ticker(self):
return self._current_ticker
def get_current_book(self):
result = {
'sequence': self._sequence,
'asks': [],
'bids': [],
}
for ask in self._asks:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_ask = self._asks[ask]
except KeyError:
continue
for order in this_ask:
result['asks'].append([order['price'], order['size'], order['id']])
for bid in self._bids:
try:
# There can be a race condition here, where a price point is removed
# between these two ops
this_bid = self._bids[bid]
except KeyError:
continue
for order in this_bid:
result['bids'].append([order['price'], order['size'], order['id']])
return result
def get_ask(self):
return self._asks.peekitem(0)[0]
def get_asks(self, price):
return self._asks.get(price)
def remove_asks(self, price):
del self._asks[price]
def set_asks(self, price, asks):
self._asks[price] = asks
def get_bid(self):
return self._bids.peekitem(-1)[0]
def get_bids(self, price):
return self._bids.get(price)
def remove_bids(self, price):
del self._bids[price]
def set_bids(self, price, bids):
self._bids[price] = bids
if __name__ == '__main__':
import sys
import time
import datetime as dt
class OrderBookConsole(OrderBook):
''' Logs real-time changes to the bid-ask spread to the console '''
def __init__(self, product_id=None):
super(OrderBookConsole, self).__init__(product_id=product_id)
# latest values of bid-ask spread
self._bid = None
self._ask = None
self._bid_depth = None
self._ask_depth = None
def on_message(self, message):
super(OrderBookConsole, self).on_message(message)
# Calculate newest bid-ask spread
bid = self.get_bid()
bids = self.get_bids(bid)
bid_depth = sum([b['size'] for b in bids])
ask = self.get_ask()
asks = self.get_asks(ask)
ask_depth = sum([a['size'] for a in asks])
if self._bid == bid and self._ask == ask and self._bid_depth == bid_depth and self._ask_depth == ask_depth:
# If there are no changes to the bid-ask spread since the last update, no need to print
pass
else:
# If there are differences, update the cache
self._bid = bid
self._ask = ask
self._bid_depth = bid_depth
self._ask_depth = ask_depth
print('{} {} bid: {:.3f} @ {:.2f}\task: {:.3f} @ {:.2f}'.format(
dt.datetime.now(), self.product_id, bid_depth, bid, ask_depth, ask))
order_book = OrderBookConsole()
order_book.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
order_book.close()
if order_book.error:
sys.exit(1)
else:
sys.exit(0)
| cbpro/order_book.py | 9,579 | Logs real-time changes to the bid-ask spread to the console
Currently OrderBook only supports a single product even though it is stored as a list of products.
cbpro/order_book.py David Caseria Live order book updated from the Coinbase Websocket Feed ignore older messages (e.g. before order book initialization from getProductOrderBook) There can be a race condition here, where a price point is removed between these two ops There can be a race condition here, where a price point is removed between these two ops latest values of bid-ask spread Calculate newest bid-ask spread If there are no changes to the bid-ask spread since the last update, no need to print If there are differences, update the cache | 711 | en | 0.898401 |
import json
import boto3
import os
from helper import AwsHelper
import time
def startJob(bucketName, objectName, itemId, snsTopic, snsRole, apiName):
print("Starting job with itemId: {}, bucketName: {}, objectName: {}".format(itemId, bucketName, objectName))
response = None
client = AwsHelper().getClient('rekognition')
if(apiName == "labels"):
response = client.start_label_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "text"):
response = client.start_text_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "faces"):
response = client.start_face_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "moderation"):
response = client.start_content_moderation(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
elif(apiName == "celebrities"):
response = client.start_celebrity_recognition(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
else:
response = client.start_label_detection(
Video={
'S3Object': {
'Bucket': bucketName,
'Name': objectName
}
},
ClientRequestToken = itemId,
NotificationChannel={
'SNSTopicArn': snsTopic,
'RoleArn': snsRole
},
JobTag=itemId
)
return response["JobId"]
def processItem(message, snsTopic, snsRole):
print('message:')
print(message)
messageBody = json.loads(message['Body'])
bucketName = messageBody['bucketName']
objectName = messageBody['objectName']
itemId = messageBody['itemId']
apiName = objectName.split("/")[0]
print('Bucket Name: ' + bucketName)
print('Object Name: ' + objectName)
print('Task ID: ' + itemId)
print('starting Rekognition job...')
jobId = startJob(bucketName, objectName, itemId, snsTopic, snsRole, apiName)
if(jobId):
print("Started Job with Id: {}".format(jobId))
return jobId
def changeVisibility(sqs, qUrl, receipt_handle):
try:
sqs.change_message_visibility(
QueueUrl=qUrl,
ReceiptHandle=receipt_handle,
VisibilityTimeout=0
)
except Exception as e:
print("Failed to change visibility for {} with error: {}".format(receipt_handle, e))
def getMessagesFromQueue(sqs, qUrl,):
# Receive message from SQS queue
response = sqs.receive_message(
QueueUrl=qUrl,
MaxNumberOfMessages=1,
VisibilityTimeout=60 #14400
)
print('SQS Response Received:')
print(response)
if('Messages' in response):
return response['Messages']
else:
print("No messages in queue.")
return None
def processItems(qUrl, snsTopic, snsRole):
sqs = AwsHelper().getClient('sqs')
messages = getMessagesFromQueue(sqs, qUrl)
jc = 0
totalMessages = 0
hitLimit = False
limitException = None
if(messages):
totalMessages = len(messages)
print("Total messages: {}".format(totalMessages))
for message in messages:
receipt_handle = message['ReceiptHandle']
try:
if(hitLimit):
changeVisibility(sqs, qUrl, receipt_handle)
else:
print("starting job...")
processItem(message, snsTopic, snsRole)
print("started job...")
print('Deleting item from queue...')
# Delete received message from queue
sqs.delete_message(
QueueUrl=qUrl,
ReceiptHandle=receipt_handle
)
print('Deleted item from queue...')
jc += 1
except Exception as e:
print("Error while starting job or deleting from queue: {}".format(e))
changeVisibility(sqs, qUrl, receipt_handle)
if(e.__class__.__name__ == 'LimitExceededException'
or e.__class__.__name__ == "ProvisionedThroughputExceededException"):
hitLimit = True
limitException = e
if(hitLimit):
raise limitException()
return totalMessages, jc
def processRequest(request):
qUrl = request['qUrl']
snsTopic = request['snsTopic']
snsRole = request['snsRole']
i = 0
max = 100
totalJobsScheduled = 0
hitLimit = False
provisionedThroughputExceededCount = 0
while(i < max):
try:
tc, jc = processItems(qUrl, snsTopic, snsRole)
totalJobsScheduled += jc
if(tc == 0):
i = max
except Exception as e:
if(e.__class__.__name__ == 'LimitExceededException'):
print("Exception: Hit limit.")
hitLimit = True
i = max
elif(e.__class__.__name__ == "ProvisionedThroughputExceededException"):
print("ProvisionedThroughputExceededException.")
provisionedThroughputExceededCount += 1
if(provisionedThroughputExceededCount > 5):
i = max
else:
print("Waiting for few seconds...")
time.sleep(5)
print("Waking up...")
i += 1
output = "Started {} jobs.".format(totalJobsScheduled)
if(hitLimit):
output += " Hit limit."
print(output)
return {
'statusCode': 200,
'body': output
}
def lambda_handler(event, context):
print("event: {}".format(event))
request = {}
request["qUrl"] = os.environ['ASYNC_QUEUE_URL']
request["snsTopic"] = os.environ['SNS_TOPIC_ARN']
request["snsRole"] = os.environ['SNS_ROLE_ARN']
return processRequest(request)
| rekognition-pipeline/lambda/asyncprocessor/lambda_function.py | 7,540 | Receive message from SQS queue14400 Delete received message from queue | 70 | en | 0.979641 |
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Watermark(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'watermark_text': 'str',
'watermark_text_size': 'int',
'watermark_position': 'OneOfWatermarkWatermarkPosition',
'watermark_rotation_angle': 'int',
'is_watermark_mandatory': 'bool',
'watermark_intensity': 'int'
}
attribute_map = {
'watermark_text': 'watermarkText',
'watermark_text_size': 'watermarkTextSize',
'watermark_position': 'watermarkPosition',
'watermark_rotation_angle': 'watermarkRotationAngle',
'is_watermark_mandatory': 'isWatermarkMandatory',
'watermark_intensity': 'watermarkIntensity'
}
def __init__(self, watermark_text=None, watermark_text_size=None, watermark_position=None, watermark_rotation_angle=None, is_watermark_mandatory=None, watermark_intensity=None): # noqa: E501
"""Watermark - a model defined in Swagger""" # noqa: E501
self._watermark_text = None
self._watermark_text_size = None
self._watermark_position = None
self._watermark_rotation_angle = None
self._is_watermark_mandatory = None
self._watermark_intensity = None
self.discriminator = None
if watermark_text is not None:
self.watermark_text = watermark_text
if watermark_text_size is not None:
self.watermark_text_size = watermark_text_size
if watermark_position is not None:
self.watermark_position = watermark_position
if watermark_rotation_angle is not None:
self.watermark_rotation_angle = watermark_rotation_angle
if is_watermark_mandatory is not None:
self.is_watermark_mandatory = is_watermark_mandatory
if watermark_intensity is not None:
self.watermark_intensity = watermark_intensity
@property
def watermark_text(self):
"""Gets the watermark_text of this Watermark. # noqa: E501
The watermark text associated with the tag defintion. # noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
"""
return self._watermark_text
@watermark_text.setter
def watermark_text(self, watermark_text):
"""Sets the watermark_text of this Watermark.
The watermark text associated with the tag defintion. # noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
"""
self._watermark_text = watermark_text
@property
def watermark_text_size(self):
"""Gets the watermark_text_size of this Watermark. # noqa: E501
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:return: The watermark_text_size of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_text_size
@watermark_text_size.setter
def watermark_text_size(self, watermark_text_size):
"""Sets the watermark_text_size of this Watermark.
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501
:type: int
"""
self._watermark_text_size = watermark_text_size
@property
def watermark_position(self):
"""Gets the watermark_position of this Watermark. # noqa: E501
The position of the watermark on the page. # noqa: E501
:return: The watermark_position of this Watermark. # noqa: E501
:rtype: OneOfWatermarkWatermarkPosition
"""
return self._watermark_position
@watermark_position.setter
def watermark_position(self, watermark_position):
"""Sets the watermark_position of this Watermark.
The position of the watermark on the page. # noqa: E501
:param watermark_position: The watermark_position of this Watermark. # noqa: E501
:type: OneOfWatermarkWatermarkPosition
"""
self._watermark_position = watermark_position
@property
def watermark_rotation_angle(self):
"""Gets the watermark_rotation_angle of this Watermark. # noqa: E501
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:return: The watermark_rotation_angle of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_rotation_angle
@watermark_rotation_angle.setter
def watermark_rotation_angle(self, watermark_rotation_angle):
"""Sets the watermark_rotation_angle of this Watermark.
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501
:type: int
"""
self._watermark_rotation_angle = watermark_rotation_angle
@property
def is_watermark_mandatory(self):
"""Gets the is_watermark_mandatory of this Watermark. # noqa: E501
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:return: The is_watermark_mandatory of this Watermark. # noqa: E501
:rtype: bool
"""
return self._is_watermark_mandatory
@is_watermark_mandatory.setter
def is_watermark_mandatory(self, is_watermark_mandatory):
"""Sets the is_watermark_mandatory of this Watermark.
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501
:type: bool
"""
self._is_watermark_mandatory = is_watermark_mandatory
@property
def watermark_intensity(self):
"""Gets the watermark_intensity of this Watermark. # noqa: E501
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:return: The watermark_intensity of this Watermark. # noqa: E501
:rtype: int
"""
return self._watermark_intensity
@watermark_intensity.setter
def watermark_intensity(self, watermark_intensity):
"""Sets the watermark_intensity of this Watermark.
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501
:type: int
"""
self._watermark_intensity = watermark_intensity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Watermark, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Watermark):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| laserfiche_api/models/watermark.py | 9,163 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
Watermark - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the is_watermark_mandatory of this Watermark. # noqa: E501
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:return: The is_watermark_mandatory of this Watermark. # noqa: E501
:rtype: bool
Sets the is_watermark_mandatory of this Watermark.
A boolean indicating whether or not the watermark associated with the tag is mandatory. # noqa: E501
:param is_watermark_mandatory: The is_watermark_mandatory of this Watermark. # noqa: E501
:type: bool
Returns the model properties as a dict
Returns the string representation of the model
Gets the watermark_intensity of this Watermark. # noqa: E501
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:return: The watermark_intensity of this Watermark. # noqa: E501
:rtype: int
Sets the watermark_intensity of this Watermark.
The intensity of the watermark associated with the tag definition. Valid value ranges from 0 to 100, with -1 as the default values. # noqa: E501
:param watermark_intensity: The watermark_intensity of this Watermark. # noqa: E501
:type: int
Gets the watermark_position of this Watermark. # noqa: E501
The position of the watermark on the page. # noqa: E501
:return: The watermark_position of this Watermark. # noqa: E501
:rtype: OneOfWatermarkWatermarkPosition
Sets the watermark_position of this Watermark.
The position of the watermark on the page. # noqa: E501
:param watermark_position: The watermark_position of this Watermark. # noqa: E501
:type: OneOfWatermarkWatermarkPosition
Gets the watermark_rotation_angle of this Watermark. # noqa: E501
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:return: The watermark_rotation_angle of this Watermark. # noqa: E501
:rtype: int
Sets the watermark_rotation_angle of this Watermark.
The rotation angle, in degrees, of the watermark associated with the tag definition. # noqa: E501
:param watermark_rotation_angle: The watermark_rotation_angle of this Watermark. # noqa: E501
:type: int
Gets the watermark_text of this Watermark. # noqa: E501
The watermark text associated with the tag defintion. # noqa: E501
:return: The watermark_text of this Watermark. # noqa: E501
:rtype: str
Sets the watermark_text of this Watermark.
The watermark text associated with the tag defintion. # noqa: E501
:param watermark_text: The watermark_text of this Watermark. # noqa: E501
:type: str
Gets the watermark_text_size of this Watermark. # noqa: E501
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:return: The watermark_text_size of this Watermark. # noqa: E501
:rtype: int
Sets the watermark_text_size of this Watermark.
The size of the watermark text, in points, associated with the tag definition. # noqa: E501
:param watermark_text_size: The watermark_text_size of this Watermark. # noqa: E501
:type: int
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href="https://developer.laserfiche.com">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 3,737 | en | 0.627806 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# pylint: disable=no-member
#
# @Author: oesteban
# @Date: 2016-02-23 19:25:39
# @Email: code@oscaresteban.es
# @Last Modified by: oesteban
# @Last Modified time: 2016-02-29 11:43:16
"""
Computation of the quality assessment measures on functional MRI
"""
import os.path as op
import numpy as np
import nibabel as nb
from nitime import algorithms as nta
import scipy
def gsr(epi_data, mask, direction="y", ref_file=None, out_file=None):
"""
Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The
procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
"""
direction = direction.lower()
if direction[-1] not in ['x', 'y', 'all']:
raise Exception("Unknown direction %s, should be one of x, -x, y, -y, all"
% direction)
if direction == 'all':
result = []
for newdir in ['x', 'y']:
ofile = None
if out_file is not None:
fname, ext = op.splitext(ofile)
if ext == '.gz':
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
ofile = '%s_%s%s' % (fname, newdir, ext)
result += [gsr(epi_data, mask, newdir,
ref_file=ref_file, out_file=ofile)]
return result
# Step 1
n2_mask = np.zeros_like(mask)
# Step 2
if direction == "x":
n2lim = np.floor(mask.shape[0]/2)
n2_mask[:n2lim, :, :] = mask[n2lim:(n2lim*2), :, :]
n2_mask[n2lim:(n2lim*2), :, :] = mask[:n2lim, :, :]
elif direction == "y":
n2lim = np.floor(mask.shape[1]/2)
n2_mask[:, :n2lim, :] = mask[:, n2lim:(n2lim*2), :]
n2_mask[:, n2lim:(n2lim*2), :] = mask[:, :n2lim, :]
elif direction == "z":
n2lim = np.floor(mask.shape[2]/2)
n2_mask[:, :, :n2lim] = mask[:, :, n2lim:(n2lim*2)]
n2_mask[:, :, n2lim:(n2lim*2)] = mask[:, :, :n2lim]
# Step 3
n2_mask = n2_mask * (1-mask)
# Step 4: non-ghost background region is labeled as 2
n2_mask = n2_mask + 2 * (1 - n2_mask - mask)
# Save mask
if ref_file is not None and out_file is not None:
ref = nb.load(ref_file)
out = nb.Nifti1Image(n2_mask, ref.get_affine(), ref.get_header())
out.to_filename(out_file)
# Step 5: signal is the entire foreground image
ghost = epi_data[n2_mask == 1].mean() - epi_data[n2_mask == 2].mean()
signal = epi_data[n2_mask == 0].mean()
return float(ghost/signal)
def dvars(func, mask, output_all=False, out_file=None):
"""
Compute the mean :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Power2012]_.
Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Nichols2013]_ are computed.
.. note:: Implementation details
Uses the implementation of the `Yule-Walker equations
from nitime
<http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html\
#nitime.algorithms.autoregressive.AR_est_YW>`_
for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.
:param numpy.ndarray func: functional data, after head-motion-correction.
:param numpy.ndarray mask: a 3D mask of the brain
:param bool output_all: write out all dvars
:param str out_file: a path to which the standardized dvars should be saved.
:return: the standardized DVARS
"""
if len(func.shape) != 4:
raise RuntimeError(
"Input fMRI dataset should be 4-dimensional" % func)
# Remove zero-variance voxels across time axis
zv_mask = zero_variance(func, mask)
idx = np.where(zv_mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]
# Robust standard deviation
func_sd = (np.percentile(mfunc, 75) -
np.percentile(mfunc, 25)) / 1.349
# Demean
mfunc -= mfunc.mean(axis=1)[..., np.newaxis]
# AR1
ak_coeffs = np.apply_along_axis(nta.AR_est_YW, 1, mfunc, 1)
# Predicted standard deviation of temporal derivative
func_sd_pd = np.squeeze(np.sqrt((2 * (1 - ak_coeffs[:, 0])).tolist()) * func_sd)
diff_sd_mean = func_sd_pd[func_sd_pd > 0].mean()
# Compute temporal difference time series
func_diff = np.diff(mfunc, axis=1)
# DVARS (no standardization)
dvars_nstd = func_diff.std(axis=0)
# standardization
dvars_stdz = dvars_nstd / diff_sd_mean
# voxelwise standardization
diff_vx_stdz = func_diff / np.array([func_sd_pd] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(1, ddof=1)
if output_all:
gendvars = np.vstack((dvars_stdz, dvars_nstd, dvars_vx_stdz))
else:
gendvars = dvars_stdz.reshape(len(dvars_stdz), 1)
if out_file is not None:
np.savetxt(out_file, gendvars, fmt='%.12f')
return gendvars
def fd_jenkinson(in_file, rmax=80., out_file=None):
"""
Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_
on a 4D dataset, after ``3dvolreg`` has been executed
(generally a file named ``*.affmat12.1D``).
:param str in_file: path to epi file
:param float rmax: the default radius (as in FSL) of a sphere represents
the brain in which the angular displacements are projected.
:param str out_file: a path for the output file with the FD
:return: the output file with the FD, and the average FD along
the time series
:rtype: tuple(str, float)
.. note ::
:code:`infile` should have one 3dvolreg affine matrix in one row -
NOT the motion parameters
"""
import sys
import math
if out_file is None:
fname, ext = op.splitext(op.basename(in_file))
out_file = op.abspath('%s_fdfile%s' % (fname, ext))
# if in_file (coordinate_transformation) is actually the rel_mean output
# of the MCFLIRT command, forward that file
if 'rel.rms' in in_file:
return in_file
pm_ = np.genfromtxt(in_file)
original_shape = pm_.shape
pm = np.zeros((pm_.shape[0], pm_.shape[1] + 4))
pm[:, :original_shape[1]] = pm_
pm[:, original_shape[1]:] = [0.0, 0.0, 0.0, 1.0]
# rigid body transformation matrix
T_rb_prev = np.matrix(np.eye(4))
flag = 0
X = [0] # First timepoint
for i in range(0, pm.shape[0]):
# making use of the fact that the order of aff12 matrix is "row-by-row"
T_rb = np.matrix(pm[i].reshape(4, 4))
if flag == 0:
flag = 1
else:
M = np.dot(T_rb, T_rb_prev.I) - np.eye(4)
A = M[0:3, 0:3]
b = M[0:3, 3]
FD_J = math.sqrt(
(rmax * rmax / 5) * np.trace(np.dot(A.T, A)) + np.dot(b.T, b))
X.append(FD_J)
T_rb_prev = T_rb
np.savetxt(out_file, X)
return out_file
def gcor(func, mask):
"""
Compute the :abbr:`GCOR (global correlation)`.
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the computed GCOR value
"""
# Remove zero-variance voxels across time axis
tv_mask = zero_variance(func, mask)
idx = np.where(tv_mask > 0)
zscores = scipy.stats.mstats.zscore(func[idx[0], idx[1], idx[2], :], axis=1)
avg_ts = zscores.mean(axis=0)
return float(avg_ts.transpose().dot(avg_ts) / len(avg_ts))
def zero_variance(func, mask):
"""
Mask out voxels with zero variance across t-axis
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:rtype: numpy.ndarray
"""
idx = np.where(mask > 0)
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance)
tv_mask[tvariance > 0] = 1
newmask = np.zeros_like(mask)
newmask[idx] = tv_mask
return newmask
| packages/structural_dhcp_mriqc/structural_dhcp_mriqc/qc/functional.py | 8,733 | Compute the mean :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Power2012]_.
Particularly, the *standardized* :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
[Nichols2013]_ are computed.
.. note:: Implementation details
Uses the implementation of the `Yule-Walker equations
from nitime
<http://nipy.org/nitime/api/generated/nitime.algorithms.autoregressive.html#nitime.algorithms.autoregressive.AR_est_YW>`_
for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal.
:param numpy.ndarray func: functional data, after head-motion-correction.
:param numpy.ndarray mask: a 3D mask of the brain
:param bool output_all: write out all dvars
:param str out_file: a path to which the standardized dvars should be saved.
:return: the standardized DVARS
Compute the :abbr:`FD (framewise displacement)` [Jenkinson2002]_
on a 4D dataset, after ``3dvolreg`` has been executed
(generally a file named ``*.affmat12.1D``).
:param str in_file: path to epi file
:param float rmax: the default radius (as in FSL) of a sphere represents
the brain in which the angular displacements are projected.
:param str out_file: a path for the output file with the FD
:return: the output file with the FD, and the average FD along
the time series
:rtype: tuple(str, float)
.. note ::
:code:`infile` should have one 3dvolreg affine matrix in one row -
NOT the motion parameters
Compute the :abbr:`GCOR (global correlation)`.
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the computed GCOR value
Computes the :abbr:`GSR (ghost to signal ratio)` [Giannelli2010]_. The
procedure is as follows:
#. Create a Nyquist ghost mask by circle-shifting the original mask by :math:`N/2`.
#. Rotate by :math:`N/2`
#. Remove the intersection with the original mask
#. Generate a non-ghost background
#. Calculate the :abbr:`GSR (ghost to signal ratio)`
.. warning ::
This should be used with EPI images for which the phase
encoding direction is known.
:param str epi_file: path to epi file
:param str mask_file: path to brain mask
:param str direction: the direction of phase encoding (x, y, all)
:return: the computed gsr
Mask out voxels with zero variance across t-axis
:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:rtype: numpy.ndarray
Computation of the quality assessment measures on functional MRI
!/usr/bin/env python -*- coding: utf-8 -*- emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- vi: set ft=python sts=4 ts=4 sw=4 et: pylint: disable=no-member @Author: oesteban @Date: 2016-02-23 19:25:39 @Email: code@oscaresteban.es @Last Modified by: oesteban @Last Modified time: 2016-02-29 11:43:16 Step 1 Step 2 Step 3 Step 4: non-ghost background region is labeled as 2 Save mask Step 5: signal is the entire foreground image Remove zero-variance voxels across time axis Robust standard deviation Demean AR1 Predicted standard deviation of temporal derivative Compute temporal difference time series DVARS (no standardization) standardization voxelwise standardization if in_file (coordinate_transformation) is actually the rel_mean output of the MCFLIRT command, forward that file rigid body transformation matrix First timepoint making use of the fact that the order of aff12 matrix is "row-by-row" Remove zero-variance voxels across time axis | 3,630 | en | 0.729733 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tutotrial.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| tutorial/manage.py | 665 | Run administrative tasks.
Django's command-line utility for administrative tasks.
!/usr/bin/env python | 103 | en | 0.725633 |
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 64, 64#572, 572
out_height, out_width = 64, 64#388, 388
GPU = False
torch.manual_seed(0)
class Mynet(torch.nn.Module):
def __init__(self):
super(Mynet, self).__init__() # necessarry?
enc1 = []
enc1.append(torch.nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
enc1.append(torch.nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1))
enc1.append(torch.nn.BatchNorm2d(32))
enc1.append(torch.nn.ReLU())
self.enc1 = torch.nn.Sequential(*enc1)
self.out = torch.nn.Conv2d(32, 1, kernel_size, padding=0, stride=1)
| Question_semaseg/my_answers/bin_loss_pytorch.py | 1,518 | 572, 572388, 388 necessarry? | 29 | es | 0.213721 |
# -*- coding: utf-8 -*-
"""
celery.result
~~~~~~~~~~~~~
Task results/state and groups of results.
"""
from __future__ import absolute_import
import time
import warnings
from collections import deque
from contextlib import contextmanager
from copy import copy
from kombu.utils import cached_property
from kombu.utils.compat import OrderedDict
from . import current_app
from . import states
from ._state import _set_task_join_will_block, task_join_will_block
from .app import app_or_default
from .datastructures import DependencyGraph, GraphFormatter
from .exceptions import IncompleteStream, TimeoutError
from .five import items, range, string_t, monotonic
from .utils import deprecated
__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult',
'EagerResult', 'result_from_tuple']
E_WOULDBLOCK = """\
Never call result.get() within a task!
See http://docs.celeryq.org/en/latest/userguide/tasks.html\
#task-synchronous-subtasks
In Celery 3.2 this will result in an exception being
raised instead of just being a warning.
"""
def assert_will_not_block():
if task_join_will_block():
warnings.warn(RuntimeWarning(E_WOULDBLOCK))
@contextmanager
def allow_join_result():
reset_value = task_join_will_block()
_set_task_join_will_block(False)
try:
yield
finally:
_set_task_join_will_block(reset_value)
class ResultBase(object):
"""Base class for all results"""
#: Parent result (if part of a chain)
parent = None
class AsyncResult(ResultBase):
"""Query task state.
:param id: see :attr:`id`.
:keyword backend: see :attr:`backend`.
"""
app = None
#: Error raised for timeouts.
TimeoutError = TimeoutError
#: The task's UUID.
id = None
#: The task result backend to use.
backend = None
def __init__(self, id, backend=None, task_name=None,
app=None, parent=None):
self.app = app_or_default(app or self.app)
self.id = id
self.backend = backend or self.app.backend
self.task_name = task_name
self.parent = parent
self._cache = None
def as_tuple(self):
parent = self.parent
return (self.id, parent and parent.as_tuple()), None
serializable = as_tuple # XXX compat
def forget(self):
"""Forget about (and possibly remove the result of) this task."""
self._cache = None
self.backend.forget(self.id)
def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers.
Any worker receiving the task, or having reserved the
task, *must* ignore it.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from workers. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
"""
self.app.control.revoke(self.id, connection=connection,
terminate=terminate, signal=signal,
reply=wait, timeout=timeout)
def get(self, timeout=None, propagate=True, interval=0.5,
no_ack=True, follow_parents=True,
EXCEPTION_STATES=states.EXCEPTION_STATES,
PROPAGATE_STATES=states.PROPAGATE_STATES):
"""Wait until task is ready, and return its result.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please read :ref:`task-synchronous-subtasks`.
:keyword timeout: How long to wait, in seconds, before the
operation times out.
:keyword propagate: Re-raise exception if the task failed.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve the result. Note that this does not have any effect
when using the amqp result store backend, as it does not
use polling.
:keyword no_ack: Enable amqp no ack (automatically acknowledge
message). If this is :const:`False` then the message will
**not be acked**.
:keyword follow_parents: Reraise any exception raised by parent task.
:raises celery.exceptions.TimeoutError: if `timeout` is not
:const:`None` and the result does not arrive within `timeout`
seconds.
If the remote call raised an exception then that exception will
be re-raised.
"""
assert_will_not_block()
on_interval = None
if follow_parents and propagate and self.parent:
on_interval = self._maybe_reraise_parent_error
on_interval()
if self._cache:
if propagate:
self.maybe_reraise()
return self.result
meta = self.backend.wait_for(
self.id, timeout=timeout,
interval=interval,
on_interval=on_interval,
no_ack=no_ack,
)
if meta:
self._maybe_set_cache(meta)
status = meta['status']
if status in PROPAGATE_STATES and propagate:
raise meta['result']
return meta['result']
wait = get # deprecated alias to :meth:`get`.
def _maybe_reraise_parent_error(self):
for node in reversed(list(self._parents())):
node.maybe_reraise()
def _parents(self):
node = self.parent
while node:
yield node
node = node.parent
def collect(self, intermediate=False, **kwargs):
"""Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
Note that the ``trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Calling :meth:`collect` would return:
.. code-block:: python
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
"""
for _, R in self.iterdeps(intermediate=intermediate):
yield R, R.get(**kwargs)
def get_leaf(self):
value = None
for _, R in self.iterdeps():
value = R.get()
return value
def iterdeps(self, intermediate=False):
stack = deque([(None, self)])
while stack:
parent, node = stack.popleft()
yield parent, node
if node.ready():
stack.extend((node, child) for child in node.children or [])
else:
if not intermediate:
raise IncompleteStream()
def ready(self):
"""Returns :const:`True` if the task has been executed.
If the task is still running, pending, or is waiting
for retry then :const:`False` is returned.
"""
return self.state in self.backend.READY_STATES
def successful(self):
"""Returns :const:`True` if the task executed successfully."""
return self.state == states.SUCCESS
def failed(self):
"""Returns :const:`True` if the task failed."""
return self.state == states.FAILURE
def maybe_reraise(self):
if self.state in states.PROPAGATE_STATES:
raise self.result
def build_graph(self, intermediate=False, formatter=None):
graph = DependencyGraph(
formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
)
for parent, node in self.iterdeps(intermediate=intermediate):
graph.add_arc(node)
if parent:
graph.add_edge(parent, node)
return graph
def __str__(self):
"""`str(self) -> self.id`"""
return str(self.id)
def __hash__(self):
"""`hash(self) -> hash(self.id)`"""
return hash(self.id)
def __repr__(self):
return '<{0}: {1}>'.format(type(self).__name__, self.id)
def __eq__(self, other):
if isinstance(other, AsyncResult):
return other.id == self.id
elif isinstance(other, string_t):
return other == self.id
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __copy__(self):
return self.__class__(
self.id, self.backend, self.task_name, self.app, self.parent,
)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return self.id, self.backend, self.task_name, None, self.parent
def __del__(self):
self._cache = None
@cached_property
def graph(self):
return self.build_graph()
@property
def supports_native_join(self):
return self.backend.supports_native_join
@property
def children(self):
return self._get_task_meta().get('children')
def _maybe_set_cache(self, meta):
if meta:
state = meta['status']
if state == states.SUCCESS or state in states.PROPAGATE_STATES:
return self._set_cache(meta)
return meta
def _get_task_meta(self):
if self._cache is None:
return self._maybe_set_cache(self.backend.get_task_meta(self.id))
return self._cache
def _set_cache(self, d):
children = d.get('children')
if children:
d['children'] = [
result_from_tuple(child, self.app) for child in children
]
self._cache = d
return d
@property
def result(self):
"""When the task has been executed, this contains the return value.
If the task raised an exception, this will be the exception
instance."""
return self._get_task_meta()['result']
info = result
@property
def traceback(self):
"""Get the traceback of a failed task."""
return self._get_task_meta().get('traceback')
@property
def state(self):
"""The tasks current state.
Possible values includes:
*PENDING*
The task is waiting for execution.
*STARTED*
The task has been started.
*RETRY*
The task is to be retried, possibly because of failure.
*FAILURE*
The task raised an exception, or has exceeded the retry limit.
The :attr:`result` attribute then contains the
exception raised by the task.
*SUCCESS*
The task executed successfully. The :attr:`result` attribute
then contains the tasks return value.
"""
return self._get_task_meta()['status']
status = state
@property
def task_id(self):
"""compat alias to :attr:`id`"""
return self.id
@task_id.setter # noqa
def task_id(self, id):
self.id = id
BaseAsyncResult = AsyncResult # for backwards compatibility.
class ResultSet(ResultBase):
"""Working with more than one result.
:param results: List of result instances.
"""
app = None
#: List of results in in the set.
results = None
def __init__(self, results, app=None, **kwargs):
self.app = app_or_default(app or self.app)
self.results = results
def add(self, result):
"""Add :class:`AsyncResult` as a new member of the set.
Does nothing if the result is already a member.
"""
if result not in self.results:
self.results.append(result)
def remove(self, result):
"""Remove result from the set; it must be a member.
:raises KeyError: if the result is not a member.
"""
if isinstance(result, string_t):
result = self.app.AsyncResult(result)
try:
self.results.remove(result)
except ValueError:
raise KeyError(result)
def discard(self, result):
"""Remove result from the set if it is a member.
If it is not a member, do nothing.
"""
try:
self.remove(result)
except KeyError:
pass
def update(self, results):
"""Update set with the union of itself and an iterable with
results."""
self.results.extend(r for r in results if r not in self.results)
def clear(self):
"""Remove all results from this set."""
self.results[:] = [] # don't create new list.
def successful(self):
"""Was all of the tasks successful?
:returns: :const:`True` if all of the tasks finished
successfully (i.e. did not raise an exception).
"""
return all(result.successful() for result in self.results)
def failed(self):
"""Did any of the tasks fail?
:returns: :const:`True` if one of the tasks failed.
(i.e., raised an exception)
"""
return any(result.failed() for result in self.results)
def maybe_reraise(self):
for result in self.results:
result.maybe_reraise()
def waiting(self):
"""Are any of the tasks incomplete?
:returns: :const:`True` if one of the tasks are still
waiting for execution.
"""
return any(not result.ready() for result in self.results)
def ready(self):
"""Did all of the tasks complete? (either by success of failure).
:returns: :const:`True` if all of the tasks has been
executed.
"""
return all(result.ready() for result in self.results)
def completed_count(self):
"""Task completion count.
:returns: the number of tasks completed.
"""
return sum(int(result.successful()) for result in self.results)
def forget(self):
"""Forget about (and possible remove the result of) all the tasks."""
for result in self.results:
result.forget()
def revoke(self, connection=None, terminate=False, signal=None,
wait=False, timeout=None):
"""Send revoke signal to all workers for all tasks in the set.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from worker. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
"""
self.app.control.revoke([r.id for r in self.results],
connection=connection, timeout=timeout,
terminate=terminate, signal=signal, reply=wait)
def __iter__(self):
return iter(self.results)
def __getitem__(self, index):
"""`res[i] -> res.results[i]`"""
return self.results[index]
@deprecated('3.2', '3.3')
def iterate(self, timeout=None, propagate=True, interval=0.5):
"""Deprecated method, use :meth:`get` with a callback argument."""
elapsed = 0.0
results = OrderedDict((result.id, copy(result))
for result in self.results)
while results:
removed = set()
for task_id, result in items(results):
if result.ready():
yield result.get(timeout=timeout and timeout - elapsed,
propagate=propagate)
removed.add(task_id)
else:
if result.backend.subpolling_interval:
time.sleep(result.backend.subpolling_interval)
for task_id in removed:
results.pop(task_id, None)
time.sleep(interval)
elapsed += interval
if timeout and elapsed >= timeout:
raise TimeoutError('The operation timed out')
def get(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""See :meth:`join`
This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.
"""
return (self.join_native if self.supports_native_join else self.join)(
timeout=timeout, propagate=propagate,
interval=interval, callback=callback, no_ack=no_ack)
def join(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True):
"""Gathers the results of all tasks as a list in order.
.. note::
This can be an expensive operation for result store
backends that must resort to polling (e.g. database).
You should consider using :meth:`join_native` if your backend
supports it.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
:keyword timeout: The number of seconds to wait for results before
the operation times out.
:keyword propagate: If any of the tasks raises an exception, the
exception will be re-raised.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this
does not have any effect when using the amqp
result store backend, as it does not use polling.
:keyword callback: Optional callback to be called for every result
received. Must have signature ``(task_id, value)``
No results will be returned by this function if
a callback is specified. The order of results
is also arbitrary when a callback is used.
To get access to the result object for a particular
id you will have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
:keyword no_ack: Automatic message acknowledgement (Note that if this
is set to :const:`False` then the messages *will not be
acknowledged*).
:raises celery.exceptions.TimeoutError: if ``timeout`` is not
:const:`None` and the operation takes longer than ``timeout``
seconds.
"""
assert_will_not_block()
time_start = monotonic()
remaining = None
results = []
for result in self.results:
remaining = None
if timeout:
remaining = timeout - (monotonic() - time_start)
if remaining <= 0.0:
raise TimeoutError('join operation timed out')
value = result.get(
timeout=remaining, propagate=propagate,
interval=interval, no_ack=no_ack,
)
if callback:
callback(result.id, value)
else:
results.append(value)
return results
def iter_native(self, timeout=None, interval=0.5, no_ack=True):
"""Backend optimized version of :meth:`iterate`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
results = self.results
if not results:
return iter([])
return self.backend.get_many(
set(r.id for r in results),
timeout=timeout, interval=interval, no_ack=no_ack,
)
def join_native(self, timeout=None, propagate=True,
interval=0.5, callback=None, no_ack=True):
"""Backend optimized version of :meth:`join`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
"""
assert_will_not_block()
order_index = None if callback else dict(
(result.id, i) for i, result in enumerate(self.results)
)
acc = None if callback else [None for _ in range(len(self))]
for task_id, meta in self.iter_native(timeout, interval, no_ack):
value = meta['result']
if propagate and meta['status'] in states.PROPAGATE_STATES:
raise value
if callback:
callback(task_id, value)
else:
acc[order_index[task_id]] = value
return acc
def _failed_join_report(self):
return (res for res in self.results
if res.backend.is_cached(res.id) and
res.state in states.PROPAGATE_STATES)
def __len__(self):
return len(self.results)
def __eq__(self, other):
if isinstance(other, ResultSet):
return other.results == self.results
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<{0}: [{1}]>'.format(type(self).__name__,
', '.join(r.id for r in self.results))
@property
def subtasks(self):
"""Deprecated alias to :attr:`results`."""
return self.results
@property
def supports_native_join(self):
try:
return self.results[0].supports_native_join
except IndexError:
pass
@property
def backend(self):
return self.app.backend if self.app else self.results[0].backend
class GroupResult(ResultSet):
"""Like :class:`ResultSet`, but with an associated id.
This type is returned by :class:`~celery.group`, and the
deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method.
It enables inspection of the tasks state and return values as
a single entity.
:param id: The id of the group.
:param results: List of result instances.
"""
#: The UUID of the group.
id = None
#: List/iterator of results in the group
results = None
def __init__(self, id=None, results=None, **kwargs):
self.id = id
ResultSet.__init__(self, results, **kwargs)
def save(self, backend=None):
"""Save group-result for later retrieval using :meth:`restore`.
Example::
>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)
"""
return (backend or self.app.backend).save_group(self.id, self)
def delete(self, backend=None):
"""Remove this result if it was previously saved."""
(backend or self.app.backend).delete_group(self.id)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return self.id, self.results
def __eq__(self, other):
if isinstance(other, GroupResult):
return other.id == self.id and other.results == self.results
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
', '.join(r.id for r in self.results))
def as_tuple(self):
return self.id, [r.as_tuple() for r in self.results]
serializable = as_tuple # XXX compat
@property
def children(self):
return self.results
@classmethod
def restore(self, id, backend=None):
"""Restore previously saved group result."""
return (
backend or (self.app.backend if self.app else current_app.backend)
).restore_group(id)
class TaskSetResult(GroupResult):
"""Deprecated version of :class:`GroupResult`"""
def __init__(self, taskset_id, results=None, **kwargs):
# XXX supports the taskset_id kwarg.
# XXX previously the "results" arg was named "subtasks".
if 'subtasks' in kwargs:
results = kwargs['subtasks']
GroupResult.__init__(self, taskset_id, results, **kwargs)
def itersubtasks(self):
"""Deprecated. Use ``iter(self.results)`` instead."""
return iter(self.results)
@property
def total(self):
"""Deprecated: Use ``len(r)``."""
return len(self)
@property
def taskset_id(self):
"""compat alias to :attr:`self.id`"""
return self.id
@taskset_id.setter # noqa
def taskset_id(self, id):
self.id = id
class EagerResult(AsyncResult):
"""Result that we know has already been executed."""
task_name = None
def __init__(self, id, ret_value, state, traceback=None):
self.id = id
self._result = ret_value
self._state = state
self._traceback = traceback
def _get_task_meta(self):
return {'task_id': self.id, 'result': self._result, 'status':
self._state, 'traceback': self._traceback}
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return (self.id, self._result, self._state, self._traceback)
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def ready(self):
return True
def get(self, timeout=None, propagate=True, **kwargs):
if self.successful():
return self.result
elif self.state in states.PROPAGATE_STATES:
if propagate:
raise self.result
return self.result
wait = get
def forget(self):
pass
def revoke(self, *args, **kwargs):
self._state = states.REVOKED
def __repr__(self):
return '<EagerResult: {0.id}>'.format(self)
@property
def result(self):
"""The tasks return value"""
return self._result
@property
def state(self):
"""The tasks state."""
return self._state
status = state
@property
def traceback(self):
"""The traceback if the task failed."""
return self._traceback
@property
def supports_native_join(self):
return False
def result_from_tuple(r, app=None):
# earlier backends may just pickle, so check if
# result is already prepared.
app = app_or_default(app)
Result = app.AsyncResult
if not isinstance(r, ResultBase):
res, nodes = r
if nodes:
return app.GroupResult(
res, [result_from_tuple(child, app) for child in nodes],
)
# previously did not include parent
id, parent = res if isinstance(res, (list, tuple)) else (res, None)
if parent:
parent = result_from_tuple(parent, app)
return Result(id, parent=parent)
return r
from_serializable = result_from_tuple # XXX compat
| venv/lib/python2.7/site-packages/celery/result.py | 28,560 | Query task state.
:param id: see :attr:`id`.
:keyword backend: see :attr:`backend`.
Result that we know has already been executed.
Like :class:`ResultSet`, but with an associated id.
This type is returned by :class:`~celery.group`, and the
deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method.
It enables inspection of the tasks state and return values as
a single entity.
:param id: The id of the group.
:param results: List of result instances.
Base class for all results
Working with more than one result.
:param results: List of result instances.
Deprecated version of :class:`GroupResult`
`res[i] -> res.results[i]`
`hash(self) -> hash(self.id)`
`str(self) -> self.id`
Add :class:`AsyncResult` as a new member of the set.
Does nothing if the result is already a member.
Remove all results from this set.
Iterator, like :meth:`get` will wait for the task to complete,
but will also follow :class:`AsyncResult` and :class:`ResultSet`
returned by the task, yielding ``(result, value)`` tuples for each
result in the tree.
An example would be having the following tasks:
.. code-block:: python
from celery import group
from proj.celery import app
@app.task(trail=True)
def A(how_many):
return group(B.s(i) for i in range(how_many))()
@app.task(trail=True)
def B(i):
return pow2.delay(i)
@app.task(trail=True)
def pow2(i):
return i ** 2
Note that the ``trail`` option must be enabled
so that the list of children is stored in ``result.children``.
This is the default but enabled explicitly for illustration.
Calling :meth:`collect` would return:
.. code-block:: python
>>> from celery.result import ResultBase
>>> from proj.tasks import A
>>> result = A.delay(10)
>>> [v for v in result.collect()
... if not isinstance(v, (ResultBase, tuple))]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Task completion count.
:returns: the number of tasks completed.
Remove this result if it was previously saved.
Remove result from the set if it is a member.
If it is not a member, do nothing.
Returns :const:`True` if the task failed.
Did any of the tasks fail?
:returns: :const:`True` if one of the tasks failed.
(i.e., raised an exception)
Forget about (and possibly remove the result of) this task.
Forget about (and possible remove the result of) all the tasks.
Wait until task is ready, and return its result.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please read :ref:`task-synchronous-subtasks`.
:keyword timeout: How long to wait, in seconds, before the
operation times out.
:keyword propagate: Re-raise exception if the task failed.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve the result. Note that this does not have any effect
when using the amqp result store backend, as it does not
use polling.
:keyword no_ack: Enable amqp no ack (automatically acknowledge
message). If this is :const:`False` then the message will
**not be acked**.
:keyword follow_parents: Reraise any exception raised by parent task.
:raises celery.exceptions.TimeoutError: if `timeout` is not
:const:`None` and the result does not arrive within `timeout`
seconds.
If the remote call raised an exception then that exception will
be re-raised.
See :meth:`join`
This is here for API compatibility with :class:`AsyncResult`,
in addition it uses :meth:`join_native` if available for the
current result backend.
Backend optimized version of :meth:`iterate`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
Deprecated method, use :meth:`get` with a callback argument.
Deprecated. Use ``iter(self.results)`` instead.
Gathers the results of all tasks as a list in order.
.. note::
This can be an expensive operation for result store
backends that must resort to polling (e.g. database).
You should consider using :meth:`join_native` if your backend
supports it.
.. warning::
Waiting for tasks within a task may lead to deadlocks.
Please see :ref:`task-synchronous-subtasks`.
:keyword timeout: The number of seconds to wait for results before
the operation times out.
:keyword propagate: If any of the tasks raises an exception, the
exception will be re-raised.
:keyword interval: Time to wait (in seconds) before retrying to
retrieve a result from the set. Note that this
does not have any effect when using the amqp
result store backend, as it does not use polling.
:keyword callback: Optional callback to be called for every result
received. Must have signature ``(task_id, value)``
No results will be returned by this function if
a callback is specified. The order of results
is also arbitrary when a callback is used.
To get access to the result object for a particular
id you will have to generate an index first:
``index = {r.id: r for r in gres.results.values()}``
Or you can create new result objects on the fly:
``result = app.AsyncResult(task_id)`` (both will
take advantage of the backend cache anyway).
:keyword no_ack: Automatic message acknowledgement (Note that if this
is set to :const:`False` then the messages *will not be
acknowledged*).
:raises celery.exceptions.TimeoutError: if ``timeout`` is not
:const:`None` and the operation takes longer than ``timeout``
seconds.
Backend optimized version of :meth:`join`.
.. versionadded:: 2.2
Note that this does not support collecting the results
for different task types using different backends.
This is currently only supported by the amqp, Redis and cache
result backends.
Returns :const:`True` if the task has been executed.
If the task is still running, pending, or is waiting
for retry then :const:`False` is returned.
Did all of the tasks complete? (either by success of failure).
:returns: :const:`True` if all of the tasks has been
executed.
Remove result from the set; it must be a member.
:raises KeyError: if the result is not a member.
Restore previously saved group result.
When the task has been executed, this contains the return value.
If the task raised an exception, this will be the exception
instance.
The tasks return value
Send revoke signal to all workers.
Any worker receiving the task, or having reserved the
task, *must* ignore it.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from workers. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
Send revoke signal to all workers for all tasks in the set.
:keyword terminate: Also terminate the process currently working
on the task (if any).
:keyword signal: Name of signal to send to process if terminate.
Default is TERM.
:keyword wait: Wait for replies from worker. Will wait for 1 second
by default or you can specify a custom ``timeout``.
:keyword timeout: Time in seconds to wait for replies if ``wait``
enabled.
Save group-result for later retrieval using :meth:`restore`.
Example::
>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)
The tasks current state.
Possible values includes:
*PENDING*
The task is waiting for execution.
*STARTED*
The task has been started.
*RETRY*
The task is to be retried, possibly because of failure.
*FAILURE*
The task raised an exception, or has exceeded the retry limit.
The :attr:`result` attribute then contains the
exception raised by the task.
*SUCCESS*
The task executed successfully. The :attr:`result` attribute
then contains the tasks return value.
The tasks state.
Deprecated alias to :attr:`results`.
Returns :const:`True` if the task executed successfully.
Was all of the tasks successful?
:returns: :const:`True` if all of the tasks finished
successfully (i.e. did not raise an exception).
compat alias to :attr:`id`
compat alias to :attr:`self.id`
Deprecated: Use ``len(r)``.
Get the traceback of a failed task.
The traceback if the task failed.
Update set with the union of itself and an iterable with
results.
Are any of the tasks incomplete?
:returns: :const:`True` if one of the tasks are still
waiting for execution.
celery.result
~~~~~~~~~~~~~
Task results/state and groups of results.
-*- coding: utf-8 -*-: Parent result (if part of a chain): Error raised for timeouts.: The task's UUID.: The task result backend to use. XXX compat deprecated alias to :meth:`get`. noqa for backwards compatibility.: List of results in in the set. don't create new list.: The UUID of the group.: List/iterator of results in the group XXX compat XXX supports the taskset_id kwarg. XXX previously the "results" arg was named "subtasks". noqa earlier backends may just pickle, so check if result is already prepared. previously did not include parent XXX compat | 9,532 | en | 0.828099 |
import tensorflow as tf
from tensorflow.keras.models import Model
import pandas as pd
import matplotlib.pyplot as plt
import os
import logging
from .common import create_directories
def get_prepared_model(stage: str, no_classes: int, input_shape: list, loss: str, optimizer: str, metrics: list) -> \
Model:
"""Function creates ANN model and compile.
Args:
stage ([str]): stage of experiment
no_classes ([INT]): No of classes for classification
input_shape ([int, int]): Input shape for model's input layer
loss ([str]): Loss function for model
optimizer ([str]): Optimizer for model
metrics ([str]): Metrics to watch while training
Returns:
model: ANN demo model
"""
# Define layers
LAYERS = []
BASE_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1'),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
KERNEL_INIT_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
BN_BEFORE_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, name='hidden1', kernel_initializer='glorot_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dense(units=196, name='hidden2', kernel_initializer='glorot_uniform'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
BN_AFTER_LAYERS = [
tf.keras.layers.Flatten(input_shape=input_shape, name='input_layer'),
tf.keras.layers.Dense(units=392, activation='relu', name='hidden1', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(units=196, activation='relu', name='hidden2', kernel_initializer='glorot_uniform',
bias_initializer='zeros'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(units=no_classes, activation='softmax', name='output_layer')
]
logging.info("Creating Model..")
if stage == 'BASE_MODEL':
LAYERS = BASE_LAYERS
elif stage == 'KERNEL_INIT_MODEL':
LAYERS = KERNEL_INIT_LAYERS
elif stage == 'BN_BEFORE_MODEL':
LAYERS = BN_BEFORE_LAYERS
elif stage == 'BN_AFTER_MODEL':
LAYERS = BN_AFTER_LAYERS
model_ann = tf.keras.models.Sequential(LAYERS)
logging.info("Compiling Model..")
model_ann.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return model_ann
def save_model(model_dir: str, model: Model, model_suffix: str) -> None:
"""
args:
model_dir: directory to save the model
model: model object to save
model_suffix: Suffix to save the model
"""
create_directories([model_dir])
model_file = os.path.join(model_dir, f"{model_suffix}.h5")
model.save(model_file)
logging.info(f"Saved model: {model_file}")
def save_history_plot(history, plot_dir: str, stage: str) -> None:
"""
Args:
history: History object for plotting loss/accuracy curves
plot_dir: Directory to save plot files
stage: Stage name for training
"""
pd.DataFrame(history.history).plot(figsize=(10, 8))
plt.grid(True)
create_directories([plot_dir])
plot_file = os.path.join(plot_dir, stage + "_loss_accuracy.png")
plt.savefig(plot_file)
logging.info(f"Loss accuracy plot saved: {plot_file}")
def get_callbacks(checkpoint_dir: str, tensorboard_logs: str, stage: str) -> list:
"""
Args:
checkpoint_dir: Directory to save the model at checkpoint
tensorboard_logs: Directory to save tensorboard logs
stage: Stage name for training
Returns:
callback_list: List of created callbacks
"""
create_directories([checkpoint_dir, tensorboard_logs])
tensorboard_cb = tf.keras.callbacks.TensorBoard(tensorboard_logs)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
ckpt_file_path = os.path.join(checkpoint_dir, f"{stage}_ckpt_model.h5")
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_file_path, save_best_only=True)
callback_list = [tensorboard_cb, early_stopping_cb, checkpoint_cb]
logging.info(f"Callbacks created: {callback_list}")
return callback_list
| src/utils/model.py | 5,188 | Args:
checkpoint_dir: Directory to save the model at checkpoint
tensorboard_logs: Directory to save tensorboard logs
stage: Stage name for training
Returns:
callback_list: List of created callbacks
Function creates ANN model and compile.
Args:
stage ([str]): stage of experiment
no_classes ([INT]): No of classes for classification
input_shape ([int, int]): Input shape for model's input layer
loss ([str]): Loss function for model
optimizer ([str]): Optimizer for model
metrics ([str]): Metrics to watch while training
Returns:
model: ANN demo model
Args:
history: History object for plotting loss/accuracy curves
plot_dir: Directory to save plot files
stage: Stage name for training
args:
model_dir: directory to save the model
model: model object to save
model_suffix: Suffix to save the model
Define layers | 880 | en | 0.694404 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Geometry export
"""
import math
import numpy as np
import log
from progress import Progress
#----------------------------------------------------------------------
# library_geometry
#----------------------------------------------------------------------
def writeLibraryGeometry(fp, meshes, config, shapes=None):
progress = Progress(len(meshes), None)
fp.write('\n <library_geometries>\n')
for mIdx,mesh in enumerate(meshes):
if shapes is None:
shape = None
else:
shape = shapes[mIdx]
writeGeometry(fp, mesh, config, shape)
progress.step()
fp.write(' </library_geometries>\n')
# TODO make shared function, config.getTransform() and mesh.clone(transform)
def rotateCoord(coord, config):
if config.meshOrientation == 'yUpFaceZ':
pass
elif config.meshOrientation == 'yUpFaceX':
# z,y,-x
coord = np.dstack((coord[:,2],coord[:,1],-coord[:,0]))[0]
elif config.meshOrientation == 'zUpFaceNegY':
# x,z,-y
coord = np.dstack((coord[:,0],-coord[:,2],coord[:,1]))[0]
elif config.meshOrientation == 'zUpFaceX':
# z,x,y
coord = np.dstack((coord[:,2],coord[:,0],coord[:,1]))[0]
return coord
def writeGeometry(fp, mesh, config, shapes=None):
progress = Progress()
progress(0)
coord = mesh.coord + config.offset
coord = rotateCoord(coord, config)
nVerts = len(coord)
fp.write('\n' +
' <geometry id="%sMesh" name="%s">\n' % (mesh.name,mesh.name) +
' <mesh>\n' +
' <source id="%s-Position">\n' % mesh.name +
' <float_array count="%d" id="%s-Position-array">\n' % (3*nVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in coord]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Position-array" stride="3">\n' % (nVerts,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.2)
# Normals
if config.useNormals:
mesh.calcNormals()
vnorm = rotateCoord(mesh.vnorm, config)
nNormals = len(mesh.vnorm)
fp.write(
' <source id="%s-Normals">\n' % mesh.name +
' <float_array count="%d" id="%s-Normals-array">\n' % (3*nNormals,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(no)) for no in vnorm]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-Normals-array" stride="3">\n' % (nNormals,mesh.name) +
' <param type="float" name="X"></param>\n' +
' <param type="float" name="Y"></param>\n' +
' <param type="float" name="Z"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.35)
# UV coordinates
nUvVerts = len(mesh.texco)
fp.write(
' <source id="%s-UV">\n' % mesh.name +
' <float_array count="%d" id="%s-UV-array">\n' % (2*nUvVerts,mesh.name) +
' ')
fp.write( ''.join([("%.4f %.4f " % tuple(uv)) for uv in mesh.texco]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor count="%d" source="#%s-UV-array" stride="2">\n' % (nUvVerts,mesh.name) +
' <param type="float" name="S"></param>\n' +
' <param type="float" name="T"></param>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.5, 0.7)
# Faces
fp.write(
' <vertices id="%s-Vertex">\n' % mesh.name +
' <input semantic="POSITION" source="#%s-Position"/>\n' % mesh.name +
' </vertices>\n')
checkFaces(mesh, nVerts, nUvVerts)
progress(0.7, 0.9)
writePolylist(fp, mesh, config)
progress(0.9, 0.99)
fp.write(
' </mesh>\n' +
' </geometry>\n')
if shapes is not None:
shaprog = Progress(len(shapes))
for name,shape in shapes:
writeShapeKey(fp, name, shape, mesh, config)
shaprog.step()
progress(1)
def writeShapeKey(fp, name, shape, mesh, config):
if len(shape.verts) == 0:
log.debug("Shapekey %s has zero verts. Ignored" % name)
return
progress = Progress()
# Verts
progress(0)
target = mesh.coord.copy()
target[:] += config.offset
target[shape.verts] += shape.data[np.s_[...]]
target = rotateCoord(config.scale*target, config)
nVerts = len(target)
fp.write(
' <geometry id="%sMeshMorph_%s" name="%s">\n' % (mesh.name, name, name) +
' <mesh>\n' +
' <source id="%sMeshMorph_%s-positions">\n' % (mesh.name, name) +
' <float_array id="%sMeshMorph_%s-positions-array" count="%d">\n' % (mesh.name, name, 3*nVerts) +
' ')
fp.write( ''.join([("%.4f %.4f %.4f " % tuple(co)) for co in target]) )
fp.write('\n' +
' </float_array>\n' +
' <technique_common>\n' +
' <accessor source="#%sMeshMorph_%s-positions-array" count="%d" stride="3">\n' % (mesh.name, name, nVerts) +
' <param name="X" type="float"/>\n' +
' <param name="Y" type="float"/>\n' +
' <param name="Z" type="float"/>\n' +
' </accessor>\n' +
' </technique_common>\n' +
' </source>\n')
progress(0.3)
# Polylist
nFaces = len(mesh.fvert)
fp.write(
' <vertices id="%sMeshMorph_%s-vertices">\n' % (mesh.name, name) +
' <input semantic="POSITION" source="#%sMeshMorph_%s-positions"/>\n' % (mesh.name, name) +
' </vertices>\n' +
' <polylist count="%d">\n' % nFaces +
' <input semantic="VERTEX" source="#%sMeshMorph_%s-vertices" offset="0"/>\n' % (mesh.name, name) +
#' <input semantic="NORMAL" source="#%sMeshMorph_%s-normals" offset="1"/>\n' % (mesh.name, name) +
' <vcount>')
fp.write( ''.join(["4 " for fv in mesh.fvert]) )
fp.write('\n' +
' </vcount>\n' +
' <p>')
fp.write( ''.join([("%d %d %d %d " % tuple(fv)) for fv in mesh.fvert]) )
fp.write('\n' +
' </p>\n' +
' </polylist>\n' +
' </mesh>\n' +
' </geometry>\n')
progress(1)
#
# writePolylist(fp, mesh, config):
#
def writePolylist(fp, mesh, config):
progress = Progress(2)
nFaces = len(mesh.fvert)
fp.write(
' <polylist count="%d">\n' % nFaces +
' <input offset="0" semantic="VERTEX" source="#%s-Vertex"/>\n' % mesh.name)
if config.useNormals:
fp.write(
' <input offset="1" semantic="NORMAL" source="#%s-Normals"/>\n' % mesh.name +
' <input offset="2" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name)
else:
fp.write(
' <input offset="1" semantic="TEXCOORD" source="#%s-UV"/>\n' % mesh.name)
vc = ''
p = ''
# get number of vertices per face
r = mesh.vertsPerFaceForExport
for fn,fv in enumerate(mesh.fvert):
fuv = mesh.fuvs[fn]
vc += str(r) + ' '
if config.useNormals:
p += ''.join([("%d %d %d " % (fv[n], fv[n], fuv[n])) for n in range(r)])
else:
p += ''.join([("%d %d " % (fv[n], fuv[n])) for n in range(r)])
fp.write(
' <vcount>' + vc + '\n' +
' </vcount>\n' +
' <p>' + p + '\n' +
' </p>\n' +
' </polylist>\n')
progress.step()
#
# checkFaces(mesh, nVerts, nUvVerts):
#
def checkFaces(mesh, nVerts, nUvVerts):
# TODO document: what does this do (apart from slowing down export)?
for fn,fvs in enumerate(mesh.fvert):
for n,vn in enumerate(fvs):
uv = mesh.fuvs[fn][n]
if vn > nVerts:
raise NameError("v %d > %d" % (vn, nVerts))
if uv > nUvVerts:
raise NameError("uv %d > %d" % (uv, nUvVerts))
| makehuman-master/makehuman/plugins/9_export_collada/dae_geometry.py | 9,908 | **Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** Thomas Larsson, Jonas Hauquier
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Geometry export
!/usr/bin/env python3 -*- coding: utf-8 -*----------------------------------------------------------------------- library_geometry---------------------------------------------------------------------- TODO make shared function, config.getTransform() and mesh.clone(transform) z,y,-x x,z,-y z,x,y Normals UV coordinates Faces Verts Polylist' <input semantic="NORMAL" source="%sMeshMorph_%s-normals" offset="1"/>\n' % (mesh.name, name) + writePolylist(fp, mesh, config): get number of vertices per face checkFaces(mesh, nVerts, nUvVerts): TODO document: what does this do (apart from slowing down export)? | 1,692 | en | 0.707142 |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.data import CacheDataset, DataLoader, Dataset
from monai.transforms import Compose, DataStatsd, Randomizable, SimulateDelayd
from monai.utils import set_determinism
TEST_CASE_1 = [[{"image": np.asarray([1, 2, 3])}, {"image": np.asarray([4, 5])}]]
TEST_CASE_2 = [[{"label": torch.as_tensor([[3], [2]])}, {"label": np.asarray([[1], [2]])}]]
class TestDataLoader(unittest.TestCase):
def test_values(self):
datalist = [
{"image": "spleen_19.nii.gz", "label": "spleen_label_19.nii.gz"},
{"image": "spleen_31.nii.gz", "label": "spleen_label_31.nii.gz"},
]
transform = Compose(
[
DataStatsd(keys=["image", "label"], data_shape=False, value_range=False, data_value=True),
SimulateDelayd(keys=["image", "label"], delay_time=0.1),
]
)
dataset = CacheDataset(data=datalist, transform=transform, cache_rate=0.5, cache_num=1)
n_workers = 0 if sys.platform == "win32" else 2
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=n_workers)
for d in dataloader:
self.assertEqual(d["image"][0], "spleen_19.nii.gz")
self.assertEqual(d["image"][1], "spleen_31.nii.gz")
self.assertEqual(d["label"][0], "spleen_label_19.nii.gz")
self.assertEqual(d["label"][1], "spleen_label_31.nii.gz")
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_exception(self, datalist):
dataset = Dataset(data=datalist, transform=None)
dataloader = DataLoader(dataset=dataset, batch_size=2, num_workers=0)
with self.assertRaisesRegex((TypeError, RuntimeError), "Collate error on the key"):
for _ in dataloader:
pass
class _RandomDataset(torch.utils.data.Dataset, Randomizable):
def __getitem__(self, index):
return self.R.randint(0, 1000, (1,))
def __len__(self):
return 8
class TestLoaderRandom(unittest.TestCase):
"""
Testing data loader working with the randomizable interface
"""
def setUp(self):
set_determinism(0)
def tearDown(self):
set_determinism(None)
def test_randomize(self):
dataset = _RandomDataset()
dataloader = DataLoader(dataset, batch_size=2, num_workers=3)
output = []
for _ in range(2):
for batch in dataloader:
output.extend(batch.data.numpy().flatten().tolist())
self.assertListEqual(output, [594, 170, 524, 778, 370, 906, 292, 589, 762, 763, 156, 886, 42, 405, 221, 166])
if __name__ == "__main__":
unittest.main()
| tests/test_dataloader.py | 3,311 | Testing data loader working with the randomizable interface
Copyright (c) MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 614 | en | 0.851058 |
# qubit number=5
# total number=45
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=29
prog.h(input_qubit[0]) # number=42
prog.cz(input_qubit[1],input_qubit[0]) # number=43
prog.h(input_qubit[0]) # number=44
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.cx(input_qubit[0],input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=41
prog.cx(input_qubit[0],input_qubit[1]) # number=34
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=35
prog.h(input_qubit[2]) # number=27
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.z(input_qubit[1]) # number=31
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1005.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| benchmark/startQiskit1005.py | 4,079 | qubit number=5 total number=45 implement the oracle O_f^\pm NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate or multi_control_Z_gate (issue 127) oracle.h(controls[n]) oracle.barrier() circuit begin number=3 number=4 number=5 number=6 number=21 number=1 number=2 number=7 number=8 number=36 number=37 number=38 number=29 number=42 number=43 number=44 number=32 number=39 number=40 number=41 number=34 number=25 number=26 number=35 number=27 number=23 number=24 number=12 number=13 number=14 number=15 number=16 number=31 number=17 number=18 number=19 number=20 circuit end | 580 | en | 0.222829 |
#from mq import *
import sys, time
import urllib3
#networking library
import json
try:
print("Press CTRL+C to abort.")
#mq = MQ();
while True:
http = urllib3.PoolManager()
#perc = mq.MQPercentage()
sys.stdout.write("\r")
sys.stdout.write("\033[K")
data = {
"error":False,
"device_id":"device123",
"fuse_stat":["0","1","0","1","0"]
}
encoded_data = json.dumps(data).encode('utf-8')#create JSON object
http.request(
'POST',
'http://192.168.43.156/smartdbbox/api/public/api/device/db/update',#IP add server
body=encoded_data,
headers={'Content-Type': 'application/json'} )
sys.stdout.flush()
time.sleep(0.1)
except:
print("\nAbort by user") | hardware/testing/fusecontrol.py | 821 | from mq import *networking librarymq = MQ();perc = mq.MQPercentage()create JSON objectIP add server | 99 | en | 0.515321 |
import click
from ...runner import events
from . import default
def handle_after_execution(context: events.ExecutionContext, event: events.AfterExecution) -> None:
context.endpoints_processed += 1
default.display_execution_result(context, event)
if context.endpoints_processed == event.schema.endpoints_count:
click.echo()
def handle_event(context: events.ExecutionContext, event: events.ExecutionEvent) -> None:
"""Short output style shows single symbols in the progress bar.
Otherwise, identical to the default output style.
"""
if isinstance(event, events.Initialized):
default.handle_initialized(context, event)
if isinstance(event, events.AfterExecution):
context.hypothesis_output.extend(event.hypothesis_output)
handle_after_execution(context, event)
if isinstance(event, events.Finished):
default.handle_finished(context, event)
if isinstance(event, events.Interrupted):
default.handle_interrupted(context, event)
| src/schemathesis/cli/output/short.py | 1,016 | Short output style shows single symbols in the progress bar.
Otherwise, identical to the default output style. | 111 | en | 0.607232 |
from ..utils import sortkey, capitalize_first
FIGURE_TEX_TEMPLATE = r'\hwgraphic{{{path}}}{{{headword}}}{{{attribution}}}'
# change to {filename} if you want to specify full paths.
FIGURE_PATH_TEMPLATE = r'figures/ill-{filename}'
class Image(object):
type = 'img'
def sk(self):
return sortkey(self.hw)
def __init__(self, hw='', img_src='', img_attrib=''):
super().__init__()
self.hw = hw
self.img_src = img_src
self.img_attrib = img_attrib
def __repr__(self):
return "(Image of '{headword}')".format(
headword=self.hw
)
def render(self, settings={}):
figure_path = FIGURE_PATH_TEMPLATE.format(filename=self.img_src)
return FIGURE_TEX_TEMPLATE.format(
headword=capitalize_first(self.hw),
path=figure_path,
attribution=self.img_attrib
)
| sfm2latex/dictionary/Image.py | 890 | change to {filename} if you want to specify full paths. | 55 | en | 0.612925 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
def console_namespace():
import console_python
get_consoles = console_python.get_console
consoles = getattr(get_consoles, "consoles", None)
if consoles:
for console, stdout, stderr in get_consoles.consoles.values():
return console.locals
return {}
def is_display_list(listvar):
from mathutils import Vector
for var in listvar:
if type(var) is not Vector:
return False
return True
class VarStates:
@staticmethod
def store_states():
# Store the display states, called upon unregister the Add-on
# This is useful when you press F8 to reload the Addons.
# Then this function preserves the display states of the
# console variables.
state_props = bpy.context.window_manager.MathVisStatePropList
variables = get_math_data()
for key, ktype in variables.items():
if key and key not in state_props:
prop = state_props.add()
prop.name = key
prop.ktype = ktype.__name__
prop.state = [True, False]
@staticmethod
def get_index(key):
index = bpy.context.window_manager.MathVisStatePropList.find(key)
return index
@staticmethod
def delete(key):
state_props = bpy.context.window_manager.MathVisStatePropList
index = state_props.find(key)
if index != -1:
state_props.remove(index)
@staticmethod
def toggle_display_state(key):
state_props = bpy.context.window_manager.MathVisStatePropList
if key in state_props:
state_props[key].state[0] = not state_props[key].state[0]
else:
print("Odd: Can not find key %s in MathVisStateProps" % (key))
@staticmethod
def toggle_lock_state(key):
state_props = bpy.context.window_manager.MathVisStatePropList
if key in state_props:
state_props[key].state[1] = not state_props[key].state[1]
else:
print("Odd: Can not find key %s in MathVisStateProps" % (key))
def get_math_data():
from mathutils import Matrix, Vector, Quaternion, Euler
locals = console_namespace()
if not locals:
return {}
variables = {}
for key, var in locals.items():
if len(key) == 0 or key[0] == "_":
continue
type_var = type(var)
# Rules out sets/dicts.
# It's also possible the length check below is slow
# for data with underlying linked-list structure.
if not hasattr(type_var, "__getitem__"):
continue
# Don't do a truth test on the data because this causes an error with some
# array types, see T66107.
len_fn = getattr(type_var, "__len__", None)
if len_fn is None:
continue
if len_fn(var) == 0:
continue
if type_var in {Matrix, Vector, Quaternion, Euler} or \
type_var in {tuple, list} and is_display_list(var):
variables[key] = type_var
return variables
def cleanup_math_data():
locals = console_namespace()
if not locals:
return
variables = get_math_data()
for key in variables.keys():
index = VarStates.get_index(key)
if index == -1:
continue
state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)
if state_prop.state[1]:
continue
del locals[key]
bpy.context.window_manager.MathVisStatePropList.remove(index)
def console_math_data():
from mathutils import Matrix, Vector, Quaternion, Euler
data_matrix = {}
data_quat = {}
data_euler = {}
data_vector = {}
data_vector_array = {}
for key, var in console_namespace().items():
if key[0] == "_":
continue
state_prop = bpy.context.window_manager.MathVisStatePropList.get(key)
if state_prop:
disp, lock = state_prop.state
if not disp:
continue
var_type = type(var)
if var_type is Matrix:
if len(var.col) != 4 or len(var.row) != 4:
if len(var.col) == len(var.row):
var = var.to_4x4()
else: # todo, support 4x3 matrix
continue
data_matrix[key] = var
elif var_type is Vector:
if len(var) < 3:
var = var.to_3d()
data_vector[key] = var
elif var_type is Quaternion:
data_quat[key] = var
elif var_type is Euler:
data_euler[key] = var
elif var_type in {list, tuple} and is_display_list(var):
data_vector_array[key] = var
return data_matrix, data_quat, data_euler, data_vector, data_vector_array
| Blender 2.91/2.91/scripts/addons/space_view3d_math_vis/utils.py | 5,621 | BEGIN GPL LICENSE BLOCK This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. END GPL LICENSE BLOCK <pep8 compliant> Store the display states, called upon unregister the Add-on This is useful when you press F8 to reload the Addons. Then this function preserves the display states of the console variables. Rules out sets/dicts. It's also possible the length check below is slow for data with underlying linked-list structure. Don't do a truth test on the data because this causes an error with some array types, see T66107. todo, support 4x3 matrix | 1,187 | en | 0.880783 |
from django_unicorn.components import QuerySetType, UnicornView
from example.coffee.models import Flavor, Taste
class AddFlavorView(UnicornView):
is_adding = False
flavors = None
flavor_qty = 1
flavor_id = None
def __init__(self, *args, **kwargs):
super().__init__(**kwargs) # calling super is required
self.flavor_id = kwargs.get('flavor_id')
self.is_adding = False
def create(self):
if int(self.flavor_qty) > 0:
for i in range(int(self.flavor_qty)):
flavor = Flavor.objects.create(id = self.flavor_id)
flavor.save()
print("create flavor")
self.is_adding = False
self.show_table()
def add_flavor(self):
self.is_adding = True
self.show_table()
def cancel(self):
self.is_adding = False
self.show_table()
def show_table(self):
self.flavors = Flavor.objects.all()
def mount(self):
self.show_table() | example/unicorn/components/add_flavor.py | 1,019 | calling super is required | 25 | en | 0.893343 |
""" Measure stent migration relative to renals
Option to visualize 2 longitudinal scans
"""
import sys, os
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel, loadmesh
from stentseg.stentdirect.stentgraph import create_mesh
from stentseg.utils.visualization import show_ctvolume
from stentseg.utils import _utils_GUI, PointSet
from stentseg.utils.picker import pick3d
from stentseg.utils.centerline import find_centerline, points_from_mesh, smooth_centerline, dist_over_centerline
from lspeas.analysis.utils_analysis import ExcelAnalysis
from stentseg.utils.utils_graphs_pointsets import point_in_pointcloud_closest_to_p
#sys.path.insert(0, os.path.abspath('..')) # parent, 2 folders further in pythonPath
#import utils_analysis
#from utils_analysis import ExcelAnalysis
#import get_anaconda_ringparts
from lspeas.utils.get_anaconda_ringparts import _get_model_hooks,get_midpoints_peaksvalleys,identify_peaks_valleys
#todo: from outline to script:
## Initialize
# select the ssdf basedir
basedir = select_dir(r'F/LSPEAS\LSPEAS_ssdf',
r'F/LSPEAS_ssdf_backup')
basedirstl = select_dir(r'D:\Profiles\koenradesma\Dropbox\UTdrive\MedDataMimics\LSPEAS_Mimics\Tests')
# select dataset
ptcode = 'LSPEAS_003'
ctcodes = ctcode1, ctcode2 = 'discharge', '12months' # ctcode2 = None if no second code
cropname = 'ring'
modelname = 'modelavgreg'
vesselname1 = 'LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_001.stl'
# LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_noRenals 7_001
vesselname2 = 'LSPEAS_003_12M_MK Smoothed_Wrapped1.0_smart 3_copy_001.stl'
sheet_renals_obs = 'renal locations obs1'
showAxis = True # True or False
showVol = 'ISO' # MIP or ISO or 2D or None
ringpart = True # True; False
clim0 = (0,2500)
# clim0 = -550,500
isoTh = 250
meshradius = 0.7
# create class object for excel analysis
foo = ExcelAnalysis() # excel locations initialized in class
## Renal origin coordinates: input by user/read excel
# coordinates, left and right most caudal renal
# ctcode1
xrenal1, yrenal1, zrenal1 = 132.7, 89.2, 85.5
renal1 = PointSet(list((xrenal1, yrenal1, zrenal1)))
# ctcode2
if ctcode2:
xrenal2, yrenal2, zrenal2 = 171, 165.1, 39.5
renal2 = PointSet(list((xrenal2, yrenal2, zrenal2)))
# renal_left, renal_right = foo.readRenalsExcel(sheet_renals_obs, ptcode, ctcode1)
# renal1 = renal_left
## Load (dynamic) stent models, vessel, ct
# Load static CT image to add as reference
s = loadvol(basedir, ptcode, ctcode1, cropname, 'avgreg')
vol1 = s.vol
if ctcode2:
s = loadvol(basedir, ptcode, ctcode2, cropname, 'avgreg')
vol2 = s.vol
# load stent model
s2 = loadmodel(basedir, ptcode, ctcode1, cropname, modelname)
model1 = s2.model
modelmesh1 = create_mesh(model1, meshradius)
if ctcode2:
s2 = loadmodel(basedir, ptcode, ctcode2, cropname, modelname)
model2 = s2.model
modelmesh2 = create_mesh(model2, meshradius)
# Load vessel mesh (output Mimics)
vessel1 = loadmesh(basedirstl,ptcode,vesselname1) #inverts Z
if ctcode2:
vessel2 = loadmesh(basedirstl,ptcode,vesselname2) #inverts Z
# get pointset from STL
ppvessel1 = points_from_mesh(vessel1, invertZ = False) # removes duplicates
if ctcode2:
ppvessel2 = points_from_mesh(vessel2, invertZ = False) # removes duplicates
## Create centerline: input start/end
# ctcode1
c1_start1 = (153, 86, 104.5) # distal end
c1_ends = [(142, 94, 64.5)] # either single point or multiple
centerline1 = find_centerline(ppvessel1, c1_start1, c1_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline1 = smooth_centerline(centerline1, 30) # 20 iterations for stepsize 0.5 is reasonable
# ctcode2
if ctcode2:
c2_start1 = (190, 165, 60) # distal end
c2_ends = [(179, 169, 17)] # either single point or multiple
centerline2 = find_centerline(ppvessel2, c2_start1, c2_ends, 0.5, ndist=20, regfactor=0.2, regsteps=10)
centerline2 = smooth_centerline(centerline2, 30)
# scipy.ndimage.interpolation.zoom
# scipy.interpolate.interpn
## Get peak and valley points
if False:
# ===== OPTION automated detection =====
# get midpoints peaks valleys
midpoints_peaks_valleys = get_midpoints_peaksvalleys(model1)
# from peaks valley pointcloud identiy peaks and valleys
R1_left,R2_left,R1_right,R2_right,R1_ant,R2_ant,R1_post,R2_post = identify_peaks_valleys(
midpoints_peaks_valleys, model1, vol1,vis=True)
# ===== OPTION excel =====
R1 = foo.readRingExcel(ptcode, ctcode1, ring='R1')
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
##
#todo: orientatie aorta bepalen dmv 4 hooks -> gemiddelde hoek
# z distance hiermee corrigeren
R2 = foo.readRingExcel(ptcode, ctcode1, ring='R2')
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
def get_stent_orientation(R1, R2):
R1, R2 = np.asarray(R1), np.asarray(R2)
R1, R2 = PointSet(R1), PointSet(R2) # turn array ndim2 into PointSet
R1_ant, R1_post, R1_left, R1_right = R1[0], R1[1], R1[2], R1[3]
R2_ant, R2_post, R2_left, R2_right = R2[0], R2[1], R2[2], R2[3]
refvector = [0,0,10] # z-axis
angle = (R1_ant-R2_ant).angle(refvector) # order does not matter
## Calculate distance ring peaks and valleys to renal
# ===== in Z =====
# proximal to renal is positive; origin is proximal
z_dist_R1_ant = list(renal1.flat)[2]-R1_ant[2]
z_dist_R1_post = list(renal1.flat)[2]-R1_post[2]
z_dist_R1_left = list(renal1.flat)[2]-R1_left[2]
z_dist_R1_right = list(renal1.flat)[2]-R1_right[2]
# ===== along centerline =====
# point of centerline closest to renal
renal1_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, renal1)
if ctcode2:
renal2_and_cl_point = point_in_pointcloud_closest_to_p(centerline2, renal2)
# point of centerline closest to peaks valleys
R1_left_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_left)
R1_right_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_right)
R1_ant_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_ant)
R1_post_and_cl_point = point_in_pointcloud_closest_to_p(centerline1, R1_post)
# calculate distance over centerline
dist_for_R1_left = dist_over_centerline(centerline1, R1_left_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_right = dist_over_centerline(centerline1, R1_right_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_ant = dist_over_centerline(centerline1, R1_ant_and_cl_point[0], renal1_and_cl_point[0])
dist_for_R1_post = dist_over_centerline(centerline1, R1_post_and_cl_point[0], renal1_and_cl_point[0])
# Main outcome 1: distance 2nd ring valleys to renal
# Main outcome 2: migration 2nd ring valleys from discharge to 1, 6, 12 months
## Visualize
f = vv.figure(2); vv.clf()
f.position = 0.00, 22.00, 1920.00, 1018.00
alpha = 0.5
if ctcode2:
a1 = vv.subplot(121)
else:
a1 = vv.gca()
show_ctvolume(vol1, model1, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol1)
model1.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh1)
vm.faceColor = 'g'
# m = vv.mesh(vessel1)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel1, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a1) # vessel
vv.plot(PointSet(list(c1_start1)), ms='.', ls='', mc='g', mw=18, axes = a1) # start1
vv.plot([e[0] for e in c1_ends], [e[1] for e in c1_ends], [e[2] for e in c1_ends], ms='.', ls='', mc='b', mw=18, axes = a1) # ends
vv.plot(centerline1, ms='.', ls='', mw=8, mc='y', axes = a1)
vv.plot(renal1, ms='.', ls='', mc='m', mw=18, axes = a1)
vv.plot(renal1_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a1)
# vv.plot(R1_left_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_right_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_ant_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
# vv.plot(R1_post_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode1))
a1.axis.axisColor= 1,1,1
a1.bgcolor= 0,0,0
a1.daspect= 1, 1, -1 # z-axis flipped
a1.axis.visible = showAxis
if ctcode2:
a2 = vv.subplot(122)
show_ctvolume(vol2, model2, showVol=showVol, clim=clim0, isoTh=isoTh)
pick3d(vv.gca(), vol2)
model2.Draw(mc='b', mw = 10, lc='g')
vm = vv.mesh(modelmesh2)
vm.faceColor = 'g'
# m = vv.mesh(vessel2)
# m.faceColor = (1,0,0, alpha) # red
# vis vessel, centerline, renal origo, peaks valleys R1
vv.plot(ppvessel2, ms='.', ls='', mc= 'r', alpha=0.2, mw = 7, axes = a2) # vessel
vv.plot(PointSet(list(c2_start1)), ms='.', ls='', mc='g', mw=18, axes = a2) # start1
vv.plot([e[0] for e in c2_ends], [e[1] for e in c2_ends], [e[2] for e in c2_ends], ms='.', ls='', mc='b', mw=18, axes = a2) # ends
vv.plot(centerline2, ms='.', ls='', mw=8, mc='y', axes = a2)
vv.plot(renal2, ms='.', ls='', mc='m', mw=18, axes = a2)
vv.plot(renal2_and_cl_point, ms='.', ls='-', mc='m', mw=18, axes = a2)
vv.xlabel('x (mm)');vv.ylabel('y (mm)');vv.zlabel('z (mm)')
vv.title('Analysis for model LSPEAS %s - %s' % (ptcode[7:], ctcode2))
a2.axis.axisColor= 1,1,1
a2.bgcolor= 0,0,0
a2.daspect= 1, 1, -1 # z-axis flipped
a2.axis.visible = showAxis
| lspeas/analysis/stent_migration.py | 9,297 | Measure stent migration relative to renals
Option to visualize 2 longitudinal scans
sys.path.insert(0, os.path.abspath('..')) parent, 2 folders further in pythonPathimport utils_analysisfrom utils_analysis import ExcelAnalysisimport get_anaconda_ringpartstodo: from outline to script: Initialize select the ssdf basedir select dataset ctcode2 = None if no second code LSPEAS_003_D_MK Smoothed_Wrapped1.0_edit-smart 4_copy_noRenals 7_001 True or False MIP or ISO or 2D or None True; False clim0 = -550,500 create class object for excel analysis excel locations initialized in class Renal origin coordinates: input by user/read excel coordinates, left and right most caudal renal ctcode1 ctcode2 renal_left, renal_right = foo.readRenalsExcel(sheet_renals_obs, ptcode, ctcode1) renal1 = renal_left Load (dynamic) stent models, vessel, ct Load static CT image to add as reference load stent model Load vessel mesh (output Mimics)inverts Zinverts Z get pointset from STL removes duplicates removes duplicates Create centerline: input start/end ctcode1 distal end either single point or multiple 20 iterations for stepsize 0.5 is reasonable ctcode2 distal end either single point or multiple scipy.ndimage.interpolation.zoom scipy.interpolate.interpn Get peak and valley points ===== OPTION automated detection ===== get midpoints peaks valleys from peaks valley pointcloud identiy peaks and valleys ===== OPTION excel =====todo: orientatie aorta bepalen dmv 4 hooks -> gemiddelde hoek z distance hiermee corrigeren turn array ndim2 into PointSet z-axis order does not matter Calculate distance ring peaks and valleys to renal ===== in Z ===== proximal to renal is positive; origin is proximal ===== along centerline ===== point of centerline closest to renal point of centerline closest to peaks valleys calculate distance over centerline Main outcome 1: distance 2nd ring valleys to renal Main outcome 2: migration 2nd ring valleys from discharge to 1, 6, 12 months Visualize m = vv.mesh(vessel1) m.faceColor = (1,0,0, alpha) red vis vessel, centerline, renal origo, peaks valleys R1 vessel start1 ends vv.plot(R1_left_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1) vv.plot(R1_right_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1) vv.plot(R1_ant_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1) vv.plot(R1_post_and_cl_point, ms='.', ls='-', mc='c', mw=18, axes = a1) z-axis flipped m = vv.mesh(vessel2) m.faceColor = (1,0,0, alpha) red vis vessel, centerline, renal origo, peaks valleys R1 vessel start1 ends z-axis flipped | 2,548 | en | 0.594953 |
"""
Django settings for my_site project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '98!@@ullqs8&yxhj7as31h-$lhdu691dnz@ch$(tsj@pe)ak&7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'testapp.apps.TestappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATICFILES_DIRS = (
os.path.join(STATIC_ROOT, 'static/'),
)
| my_site/settings.py | 3,452 | Django settings for my_site project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.11/ref/settings/databases Password validation https://docs.djangoproject.com/en/1.11/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/1.11/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.11/howto/static-files/ | 997 | en | 0.647074 |
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
import time, math
import numpy as np
from pnc.interface import Interface
from config.manipulator_config import ManipulatorConfig
from pnc.robot_system.pinocchio_robot_system import PinocchioRobotSystem
class ManipulatorInterface(Interface):
def __init__(self):
super(ManipulatorInterface, self).__init__()
self._robot = PinocchioRobotSystem(
cwd + "/robot_model/manipulator/three_link_manipulator.urdf",
cwd + "/robot_model/manipulator", True,
ManipulatorConfig.PRINT_ROBOT_INFO)
def get_command(self, sensor_data):
# Update Robot
self._robot.update_system(
sensor_data["base_com_pos"], sensor_data["base_com_quat"],
sensor_data["base_com_lin_vel"], sensor_data["base_com_ang_vel"],
sensor_data["base_joint_pos"], sensor_data["base_joint_quat"],
sensor_data["base_joint_lin_vel"],
sensor_data["base_joint_ang_vel"], sensor_data["joint_pos"],
sensor_data["joint_vel"])
# Operational Space Control
jtrq_cmd = self._compute_osc_command()
jpos_cmd = np.zeros_like(jtrq_cmd)
jvel_cmd = np.zeros_like(jtrq_cmd)
# Compute Cmd
command = self._robot.create_cmd_ordered_dict(jpos_cmd, jvel_cmd,
jtrq_cmd)
# Increase time variables
self._count += 1
self._running_time += ManipulatorConfig.DT
return command
def _compute_osc_command(self):
## TODO : Implement Operational Space Control
jtrq = np.zeros(self._robot.n_a)
return jtrq
| pnc/manipulator_pnc/manipulator_interface.py | 1,701 | Update Robot Operational Space Control Compute Cmd Increase time variables TODO : Implement Operational Space Control | 117 | en | 0.543263 |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import collections
import copy
import time
from datetime import timedelta
from sklearn.cluster import DBSCAN, KMeans
from sklearn.preprocessing import normalize
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.nn.functional as F
sys.path.append(".")
from reid import datasets
from reid import models
# from reid.models.dsbn import convert_dsbn, convert_bn
# from reid.models.csbn import convert_csbn
# from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm
# from reid.models.xbm import XBM
from reid.trainers import RSCTrainer
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import CommDataset
from reid.utils.data import IterLoader
from reid.utils.data import transforms as T
from reid.utils.data.sampler import RandomMultipleGallerySampler
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from reid.utils.rerank import compute_jaccard_distance
start_epoch = best_mAP = 0
def get_data(name, data_dir, combineall=False):
# data_dir = '/data/datasets'
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, combineall=combineall)
return dataset
def get_train_loader(args, dataset, height, width, batch_size, workers,
num_instances, iters, trainset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
# T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
train_set = sorted(dataset.train) if trainset is None else sorted(trainset)
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout,
num_classes=args.nclass)
# use CUDA
model.cuda()
model = nn.DataParallel(model)
return model
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
start_time = time.monotonic()
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create datasets
iters = args.iters if (args.iters>0) else None
print("==> Load source-domain dataset")
train_items = []
for src in args.dataset_source.split(','):
dataset = get_data(src, args.data_dir, args.combine_all)
train_items.extend(dataset.train)
dataset_source = CommDataset(train_items)
print("==> Load target-domain dataset")
dataset_target = get_data(args.dataset_target, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters)
source_classes = dataset_source.num_train_pids
args.nclass = source_classes
# Create model
model = create_model(args)
print(model)
# Evaluator
evaluator = Evaluator(model)
# Optimizer
params = [{"params": [value]} for _, value in model.named_parameters() if value.requires_grad]
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)
# Trainer
trainer = RSCTrainer(model, args.nclass, margin=args.margin)
for epoch in range(args.epochs):
train_loader_source.new_epoch()
# train_loader_target.new_epoch()
trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Test on target: ', args.dataset_target)
_, mAP = evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP>best_mAP)
best_mAP = max(mAP, best_mAP)
save_checkpoint({
'state_dict': model.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
}, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
print('\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}{}\n'.
format(epoch, mAP, best_mAP, ' *' if is_best else ''))
lr_scheduler.step()
print ('==> Test with the best model on the target domain:')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
end_time = time.monotonic()
print('Total running time: ', timedelta(seconds=end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Self-paced contrastive learning on UDA re-ID")
# data
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')
parser.add_argument('--combine-all', action='store_true',
help="if True: combinall train, query, gallery for training;")
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# cluster
parser.add_argument('--eps', type=float, default=0.6,
help="max neighbor distance for DBSCAN")
parser.add_argument('--k1', type=int, default=30,
help="hyperparameter for jaccard distance")
parser.add_argument('--k2', type=int, default=6,
help="hyperparameter for jaccard distance")
parser.add_argument('--nclass', type=int, default=1000,
help="number of classes (source+target)")
parser.add_argument('--s-class', type=int, default=1000,
help="number of classes (source)")
parser.add_argument('--t-class', type=int, default=1000,
help="number of classes (target)")
# loss
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--mu1', type=float, default=0.5,
help="weight for loss_bridge_pred")
parser.add_argument('--mu2', type=float, default=0.1,
help="weight for loss_bridge_feat")
parser.add_argument('--mu3', type=float, default=1,
help="weight for loss_div")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# xbm parameters
parser.add_argument('--memorySize', type=int, default=8192,
help='meomory bank size')
parser.add_argument('--ratio', type=float, default=1,
help='memorySize=ratio*data_size')
parser.add_argument('--featureSize', type=int, default=2048)
parser.add_argument('--use-xbm', action='store_true', help="if True: strong baseline; if False: naive baseline")
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=60)
parser.add_argument('--iters', type=int, default=200)
parser.add_argument('--step-size', type=int, default=30)
# training configs
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--eval-step', type=int, default=10)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, default='/data/datasets')
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# hbchen
parser.add_argument('--csdn', type=bool, default=False)
main()
| examples/rsc_baseline.py | 10,695 | from reid.models.dsbn import convert_dsbn, convert_bn from reid.models.csbn import convert_csbn from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm from reid.models.xbm import XBM data_dir = '/data/datasets' T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406]) use CUDA Create datasets Create model Evaluator Optimizer Trainer train_loader_target.new_epoch() data cluster loss model xbm parameters optimizer training configs path hbchen | 458 | en | 0.397495 |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains support for various built-in output mechanisms.
Here, a base OutputToFile class is implemented to provide simple output to
a file via the pickle serialization mechanism. It can be subclassed to implement
alternative serialization schemes, see json_factory.py and mfg_inspector.py for
examples.
"""
import contextlib
try:
import cPickle as pickle
except:
import pickle
import shutil
import tempfile
from openhtf import util
from openhtf.util import data
import six
# TODO(wallacbe): Switch to util
class Atomic(object):
"""Class that does atomic write in a contextual manner."""
def __init__(self, filename):
self.filename = filename
self.temp = tempfile.NamedTemporaryFile(delete=False)
def write(self, write_data):
if hasattr(write_data, 'decode'):
return self.temp.write(write_data)
return self.temp.write(write_data.encode())
def close(self):
self.temp.close()
shutil.move(self.temp.name, self.filename)
class OutputToFile(object):
"""Output the given TestRecord to a file.
Instances of this class are intended to be used as an output callback
(see Test.add_output_callbacks) to output TestRecord results to a file.
This base implementation outputs the TestRecord by serializing it via
the pickle module. Subclasses may change this by overriding the
serialize_test_record() method. Additionally, subclasses may implement
more complex file naming mechanisms by overriding the open_file() method.
Args:
test_record: The TestRecord to write out to a file.
"""
def __init__(self, filename_pattern):
self.filename_pattern = filename_pattern
@staticmethod
def serialize_test_record(test_record):
"""Override method to alter how test records are serialized to file data."""
return pickle.dumps(test_record, -1)
@staticmethod
def open_file(filename):
"""Override method to alter file open behavior or file types."""
return Atomic(filename)
@contextlib.contextmanager
def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object')
def __call__(self, test_record):
with self.open_output_file(test_record) as outfile:
outfile.write(self.serialize_test_record(test_record))
| openhtf/output/callbacks/__init__.py | 3,481 | Class that does atomic write in a contextual manner.
Output the given TestRecord to a file.
Instances of this class are intended to be used as an output callback
(see Test.add_output_callbacks) to output TestRecord results to a file.
This base implementation outputs the TestRecord by serializing it via
the pickle module. Subclasses may change this by overriding the
serialize_test_record() method. Additionally, subclasses may implement
more complex file naming mechanisms by overriding the open_file() method.
Args:
test_record: The TestRecord to write out to a file.
Override method to alter file open behavior or file types.
Open file based on pattern.
Override method to alter how test records are serialized to file data.
This module contains support for various built-in output mechanisms.
Here, a base OutputToFile class is implemented to provide simple output to
a file via the pickle serialization mechanism. It can be subclassed to implement
alternative serialization schemes, see json_factory.py and mfg_inspector.py for
examples.
Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO(wallacbe): Switch to util Ignore keys for the log filename to not convert larger data structures. | 1,725 | en | 0.831219 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CustomPurchaseReceiptItem(Document):
pass
| optic_store/optic_store/doctype/custom_purchase_receipt_item/custom_purchase_receipt_item.py | 267 | -*- coding: utf-8 -*- Copyright (c) 2019, 9T9IT and contributors For license information, please see license.txt | 112 | en | 0.816219 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import random
import torch
import torchvision
from torchvision.transforms import functional as F
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class Resize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = random.choice(self.min_size)
max_size = self.max_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class FixedResize(object):
def __init__(self, min_size, max_size):
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
# modified from torchvision to add support for max size
def get_size(self, image_size):
w, h = image_size
size = self.min_size[0]
if w < h:
return (self.max_size, size)
else:
return (size, self.max_size)
def __call__(self, image, target):
size = self.get_size(image.size)
image = F.resize(image, size)
target = target.resize(image.size)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob:
image = F.hflip(image)
target = target.transpose(0)
return image, target
class ToTensor(object):
def __call__(self, image, target):
return F.to_tensor(image), target
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target):
if self.to_bgr255:
image = image[[2, 1, 0]] * 255
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
| fcos_core/data/transforms/transforms.py | 3,275 | Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. modified from torchvision to add support for max size modified from torchvision to add support for max size | 177 | en | 0.900704 |
# -*- coding:utf-8 -*-
"""
FTX Trade module.
https://docs.ftx.com/
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import time
import zlib
import json
import copy
import hmac
import base64
from urllib.parse import urljoin
from collections import defaultdict, deque
from typing import DefaultDict, Deque, List, Dict, Tuple, Optional, Any
from itertools import zip_longest
from requests import Request
from quant.gateway import ExchangeGateway
from quant.state import State
from quant.order import Order, Fill, SymbolInfo
from quant.tasks import SingleTask, LoopRunTask
from quant.position import Position, MARGIN_MODE_CROSSED
from quant.asset import Asset
from quant.const import MARKET_TYPE_KLINE, INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
from quant.utils import tools, logger
from quant.utils.websocket import Websocket
from quant.utils.http_client import AsyncHttpRequests
from quant.utils.decorator import async_method_locker
from quant.order import ORDER_ACTION_BUY, ORDER_ACTION_SELL
from quant.order import ORDER_TYPE_LIMIT, ORDER_TYPE_MARKET
from quant.order import LIQUIDITY_TYPE_MAKER, LIQUIDITY_TYPE_TAKER
from quant.order import ORDER_STATUS_SUBMITTED, ORDER_STATUS_PARTIAL_FILLED, ORDER_STATUS_FILLED, ORDER_STATUS_CANCELED, ORDER_STATUS_FAILED
from quant.market import Kline, Orderbook, Trade, Ticker
__all__ = ("FTXRestAPI", "FTXTrader", )
class FTXRestAPI:
"""
"""
def __init__(self, host, api_key=None, api_secret=None, subaccount_name=None) -> None:
self._host = host
self._api_key = api_key
self._api_secret = api_secret
self._subaccount_name = subaccount_name
async def _request(self, method: str, path: str, **kwargs) -> Any:
url = self._host + "/api/" + path
request = Request(method, url, **kwargs)
if self._api_key and self._api_secret:
self._sign_request(request)
_, success, error = await AsyncHttpRequests.fetch(method, url, headers=request.headers, timeout=10, **kwargs)
return success, error
def _sign_request(self, request: Request) -> None:
ts = int(time.time() * 1000)
prepared = request.prepare()
signature_payload = f'{ts}{prepared.method}{prepared.path_url}'.encode()
if prepared.body:
signature_payload += prepared.body
signature = hmac.new(self._api_secret.encode(), signature_payload, 'sha256').hexdigest()
request.headers['FTX-KEY'] = self._api_key
request.headers['FTX-SIGN'] = signature
request.headers['FTX-TS'] = str(ts)
if self._subaccount_name:
request.headers['FTX-SUBACCOUNT'] = self._subaccount_name
async def list_futures(self) -> List[dict]:
return await self._request('GET', 'futures')
async def get_future(self, market: str) -> dict:
return await self._request('GET', f'futures/{market}')
async def list_markets(self) -> List[dict]:
return await self._request('GET', 'markets')
async def get_orderbook(self, market: str, depth: int = None) -> dict:
return await self._request('GET', f'markets/{market}/orderbook', params={'depth': depth})
async def get_trades(self, market: str) -> dict:
return await self._request('GET', f'markets/{market}/trades')
async def get_account_info(self) -> dict:
return await self._request('GET', 'account')
async def get_open_orders(self, market: str = None) -> List[dict]:
#return await self._request('GET', 'orders', params={'market': market})
return await self._request('GET', 'orders?market={}'.format(market))
async def get_conditional_orders(self, market: str = None) -> List[dict]:
#return await self._request('GET', 'conditional_orders', params={'market': market})
return await self._request('GET', 'conditional_orders?market={}'.format(market))
async def place_order(self, market: str, side: str, price: float, size: float, type: str = 'limit',
reduce_only: bool = False, ioc: bool = False, post_only: bool = False,
client_id: str = None) -> dict:
return await self._request('POST', 'orders', json={'market': market,
'side': side,
'price': price,
'size': size,
'type': type,
'reduceOnly': reduce_only,
'ioc': ioc,
'postOnly': post_only,
'clientId': client_id})
async def place_conditional_order(
self, market: str, side: str, size: float, type: str = 'stop',
limit_price: float = None, reduce_only: bool = False, cancel: bool = True,
trigger_price: float = None, trail_value: float = None) -> dict:
"""
To send a Stop Market order, set type='stop' and supply a trigger_price
To send a Stop Limit order, also supply a limit_price
To send a Take Profit Market order, set type='trailing_stop' and supply a trigger_price
To send a Trailing Stop order, set type='trailing_stop' and supply a trail_value
"""
assert type in ('stop', 'take_profit', 'trailing_stop')
assert type not in ('stop', 'take_profit') or trigger_price is not None, 'Need trigger prices for stop losses and take profits'
assert type not in ('trailing_stop') or (trigger_price is None and trail_value is not None), 'Trailing stops need a trail value and cannot take a trigger price'
return await self._request('POST', 'conditional_orders', json={'market': market,
'side': side,
'triggerPrice': trigger_price,
'size': size,
'reduceOnly': reduce_only,
'type': 'stop',
'cancelLimitOnTrigger': cancel,
'orderPrice': limit_price})
async def cancel_order(self, order_id: str) -> dict:
return await self._request('DELETE', f'orders/{order_id}')
async def cancel_orders(self, market_name: str = None, conditional_orders: bool = False, limit_orders: bool = False) -> dict:
return await self._request('DELETE', 'orders', json={'market': market_name,
'conditionalOrdersOnly': conditional_orders,
'limitOrdersOnly': limit_orders})
async def get_fills(self) -> List[dict]:
return await self._request('GET', 'fills')
async def get_balances(self) -> List[dict]:
return await self._request('GET', 'wallet/balances')
async def get_deposit_address(self, ticker: str) -> dict:
return await self._request('GET', f'wallet/deposit_address/{ticker}')
async def get_positions(self, show_avg_price: bool = False) -> List[dict]:
return await self._request('GET', 'positions', params={'showAvgPrice': str(show_avg_price)})
async def get_kline(self, market_name: str, resolution: int, limit: int = None, start_time: int = None, end_time: int = None) -> dict:
#GET /markets/{market_name}/candles?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time}
params = {'resolution': resolution}
if limit:
params["limit"] = limit
if start_time:
params["start_time"] = start_time
if end_time:
params["end_time"] = end_time
return await self._request('GET', f'markets/{market_name}/candles', params=params)
class FTXTrader(Websocket, ExchangeGateway):
""" FTX Trade module. You can initialize trader object with some attributes in kwargs.
"""
def __init__(self, **kwargs):
"""Initialize."""
self.cb = kwargs["cb"]
state = None
self._platform = kwargs.get("platform")
self._symbols = kwargs.get("symbols")
self._strategy = kwargs.get("strategy")
self._account = kwargs.get("account")
self._access_key = kwargs.get("access_key")
self._secret_key = kwargs.get("secret_key")
self._subaccount_name = kwargs.get("subaccount_name")
if not self._platform:
state = State(self._platform, self._account, "param platform miss")
elif self._account and (not self._access_key or not self._secret_key):
state = State(self._platform, self._account, "param access_key or secret_key miss")
elif not self._strategy:
state = State(self._platform, self._account, "param strategy miss")
elif not self._symbols:
state = State(self._platform, self._account, "param symbols miss")
if state:
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
return
self._host = "https://ftx.com"
self._wss = "wss://ftx.com"
url = self._wss + "/ws"
super(FTXTrader, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {"op": "ping"}
# Initializing our REST API client.
self._rest_api = FTXRestAPI(self._host, self._access_key, self._secret_key, self._subaccount_name)
#订单簿深度数据
self._orderbooks: DefaultDict[str, Dict[str, DefaultDict[float, float]]] = defaultdict(lambda: {side: defaultdict(float) for side in {'bids', 'asks'}})
self._assets: DefaultDict[str: Dict[str, float]] = defaultdict(lambda: {k: 0.0 for k in {'free', 'locked', 'total'}})
self._syminfo:DefaultDict[str: Dict[str, Any]] = defaultdict(dict)
if self._account != None:
self.initialize()
#如果四个行情回调函数都为空的话,就根本不需要执行市场行情相关代码
if (self.cb.on_kline_update_callback or
self.cb.on_orderbook_update_callback or
self.cb.on_trade_update_callback or
self.cb.on_ticker_update_callback):
#市场行情数据
FTXMarket(**kwargs)
@property
def rest_api(self):
return self._rest_api
async def create_order(self, symbol, action, price, quantity, order_type=ORDER_TYPE_LIMIT, *args, **kwargs):
""" Create an order.
Args:
symbol: Trade target
action: Trade direction, `BUY` or `SELL`.
price: Price of each contract.
quantity: The buying or selling quantity.
order_type: Order type, `MARKET` or `LIMIT`.
Returns:
order_no: Order ID if created successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": {"avgFillPrice": null, "clientId": null, "createdAt": "2019-11-16T11:08:37.726313+00:00", "filledSize": 0.0, "future": "ETH-PERP", "id": 871282987, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 251.0, "reduceOnly": false, "remainingSize": 0.02, "side": "sell", "size": 0.02, "status": "new", "type": "limit"}, "success": true}
if action == ORDER_ACTION_BUY:
side = "buy"
else:
side = "sell"
size = abs(float(quantity))
price = float(price)
if order_type == ORDER_TYPE_LIMIT:
ot = "limit"
elif order_type == ORDER_TYPE_MARKET:
ot = "market"
price = None
else:
raise NotImplementedError
success, error = await self._rest_api.place_order(symbol, side, price, size, ot)
if error:
return None, error
if not success["success"]:
return None, "place_order error"
result = success["result"]
return str(result["id"]), None
async def revoke_order(self, symbol, *order_nos):
""" Revoke (an) order(s).
Args:
symbol: Trade target
order_nos: Order id list, you can set this param to 0 or multiple items. If you set 0 param, you can cancel all orders for
this symbol. If you set 1 or multiple param, you can cancel an or multiple order.
Returns:
删除全部订单情况: 成功=(True, None), 失败=(False, error information)
删除单个或多个订单情况: (删除成功的订单id[], 删除失败的订单id及错误信息[]),比如删除三个都成功那么结果为([1xx,2xx,3xx], [])
"""
# If len(order_nos) == 0, you will cancel all orders for this symbol.
if len(order_nos) == 0:
success, error = await self._rest_api.cancel_orders(symbol)
if error:
return False, error
if not success["success"]:
return False, "cancel_orders error"
return True, None
# If len(order_nos) > 0, you will cancel an or multiple orders.
else:
result = []
for order_no in order_nos:
_, e = await self._rest_api.cancel_order(order_no)
if e:
result.append((order_no, e))
else:
result.append((order_no, None))
return tuple(result), None
async def get_assets(self):
""" 获取交易账户资产信息
Args:
None
Returns:
assets: Asset if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": {"backstopProvider": false, "collateral": 110.094266926, "freeCollateral": 109.734306926, "initialMarginRequirement": 0.2, "leverage": 5.0, "liquidating": false, "maintenanceMarginRequirement": 0.03, "makerFee": 0.0002, "marginFraction": 61.1703338848761, "openMarginFraction": 61.170278323147016, "positionLimit": null, "positionLimitUsed": 2.15976, "positions": [{"collateralUsed": 0.35996, "cost": -1.7999, "entryPrice": 179.99, "estimatedLiquidationPrice": 11184.0172926, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01723393, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": 0.0001}], "takerFee": 0.0007, "totalAccountValue": 110.094366926, "totalPositionSize": 1.7998, "useFttCollateral": true, "username": "8342537@qq.com"}, "success": true}
success, error = await self._rest_api.get_account_info()
if error:
return None, error
if not success["success"]:
return None, "get_account_info error"
data = success["result"]
assets = {}
total = float(data["collateral"])
free = float(data["freeCollateral"])
locked = total - free
assets["USD"] = {
"total": total,
"free": free,
"locked": locked
}
if assets == self._assets:
update = False
else:
update = True
self._assets = assets
timestamp = tools.get_cur_timestamp_ms()
ast = Asset(self._platform, self._account, self._assets, timestamp, update)
return ast, None
def _convert_order_format(self, o):
"""将交易所订单结构转换为本交易系统标准订单结构格式
"""
order_no = str(o["id"])
state = o["status"]
remain = float(o["remainingSize"])
filled = float(o["filledSize"])
size = float(o["size"])
price = None if o["price"]==None else float(o["price"])
avg_price = None if o["avgFillPrice"]==None else float(o["avgFillPrice"])
if state == "new":
status = ORDER_STATUS_SUBMITTED
elif state == "open":
if remain < size:
status = ORDER_STATUS_PARTIAL_FILLED
else:
status = ORDER_STATUS_SUBMITTED
elif state == "closed":
if filled < size:
status = ORDER_STATUS_CANCELED
else:
status = ORDER_STATUS_FILLED
else:
return None
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"order_no": order_no,
"action": ORDER_ACTION_BUY if o["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": o["market"],
"price": price,
"quantity": size,
"order_type": ORDER_TYPE_LIMIT if o["type"] == "limit" else ORDER_TYPE_MARKET,
"remain": remain, #size-filled会更好
"status": status,
"avg_price": avg_price
}
order = Order(**info)
return order
async def get_orders(self, symbol):
""" 获取当前挂单列表
Args:
symbol: Trade target
Returns:
orders: Order list if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": [{"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769622011, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 152.0, "reduceOnly": false, "remainingSize": 0.002, "side": "buy", "size": 0.002, "status": "open", "type": "limit"}, {"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769620713, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 150.0, "reduceOnly": false, "remainingSize": 0.001, "side": "buy", "size": 0.001, "status": "open", "type": "limit"}], "success": true}
orders:List[Order] = []
success, error = await self._rest_api.get_open_orders(symbol)
if error:
return None, error
if not success["success"]:
return None, "get_open_orders error"
data = success["result"]
for o in data:
order = self._convert_order_format(o)
if order == None:
return None, "get_open_orders error"
orders.append(order)
return orders, None
async def get_position(self, symbol):
""" 获取当前持仓
Args:
symbol: Trade target
Returns:
position: Position if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
#{"result": [{"collateralUsed": 0.35986, "cost": -1.7984, "entryPrice": 179.84, "estimatedLiquidationPrice": 11184.0123266, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01866927, "recentAverageOpenPrice": 179.84, "recentPnl": -0.0009, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": -0.0009}], "success": true}
success, error = await self._rest_api.get_positions(True)
if error:
return None, error
if not success["success"]:
return None, "get_position error"
p = next(filter(lambda x: x['future'] == symbol, success["result"]), None)
if p == None:
return Position(self._platform, self._account, self._strategy, symbol), None
if p["netSize"] == 0:
return Position(self._platform, self._account, self._strategy, symbol), None
pos = Position(self._platform, self._account, self._strategy, symbol)
pos.margin_mode = MARGIN_MODE_CROSSED #ftx只有全仓模式,如果想要逐仓模式的话就用子账户的方式来实现
pos.utime = tools.get_cur_timestamp_ms()
if p["netSize"] < 0: #空头仓位
pos.long_quantity = 0
pos.long_avail_qty = 0
pos.long_open_price = 0
pos.long_hold_price = 0
pos.long_liquid_price = 0
pos.long_unrealised_pnl = 0
pos.long_leverage = 0
pos.long_margin = 0
#
pos.short_quantity = abs(p["netSize"])
pos.short_avail_qty = pos.short_quantity-p["longOrderSize"] if p["longOrderSize"]<pos.short_quantity else 0
pos.short_open_price = p["recentAverageOpenPrice"]
pos.short_hold_price = p["entryPrice"]
pos.short_liquid_price = p["estimatedLiquidationPrice"]
pos.short_unrealised_pnl = p["unrealizedPnl"]
pos.short_leverage = int(1/p["initialMarginRequirement"])
pos.short_margin = p["collateralUsed"]
else: #多头仓位
pos.long_quantity = abs(p["netSize"])
pos.long_avail_qty = pos.long_quantity-p["shortOrderSize"] if p["shortOrderSize"]<pos.long_quantity else 0
pos.long_open_price = p["recentAverageOpenPrice"]
pos.long_hold_price = p["entryPrice"]
pos.long_liquid_price = p["estimatedLiquidationPrice"]
pos.long_unrealised_pnl = p["unrealizedPnl"]
pos.long_leverage = int(1/p["initialMarginRequirement"])
pos.long_margin = p["collateralUsed"]
#
pos.short_quantity = 0
pos.short_avail_qty = 0
pos.short_open_price = 0
pos.short_hold_price = 0
pos.short_liquid_price = 0
pos.short_unrealised_pnl = 0
pos.short_leverage = 0
pos.short_margin = 0
return pos, None
async def get_symbol_info(self, symbol):
""" 获取指定符号相关信息
Args:
symbol: Trade target
Returns:
symbol_info: SymbolInfo if successfully, otherwise it's None.
error: Error information, otherwise it's None.
"""
"""
{
"success": true,
"result": [
{
"name": "BTC-0628",
"baseCurrency": null,
"quoteCurrency": null,
"type": "future",
"underlying": "BTC",
"enabled": true,
"ask": 3949.25,
"bid": 3949,
"last": 10579.52,
"priceIncrement": 0.25,
"sizeIncrement": 0.001
}
]
}
"""
info = self._syminfo[symbol]
if not info:
return None, "Symbol not exist"
price_tick = float(info["priceIncrement"])
size_tick = float(info["sizeIncrement"])
size_limit = None #原始数据中没有
value_tick = None #原始数据中没有
value_limit = None #原始数据中没有
if info["type"] == "future":
base_currency = info["underlying"]
quote_currency = "USD"
settlement_currency = "USD"
else: #"spot"
base_currency = info["baseCurrency"]
quote_currency = info["quoteCurrency"]
settlement_currency = info["quoteCurrency"]
symbol_type = info["type"]
is_inverse = False
multiplier = 1
syminfo = SymbolInfo(self._platform, symbol, price_tick, size_tick, size_limit, value_tick, value_limit, base_currency, quote_currency, settlement_currency, symbol_type, is_inverse, multiplier)
return syminfo, None
async def invalid_indicate(self, symbol, indicate_type):
""" update (an) callback function.
Args:
symbol: Trade target
indicate_type: INDICATE_ORDER, INDICATE_ASSET, INDICATE_POSITION
Returns:
success: If execute successfully, return True, otherwise it's False.
error: If execute failed, return error information, otherwise it's None.
"""
async def _task():
if indicate_type == INDICATE_ORDER and self.cb.on_order_update_callback:
success, error = await self.get_orders(symbol)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
for order in success:
SingleTask.run(self.cb.on_order_update_callback, order)
elif indicate_type == INDICATE_ASSET and self.cb.on_asset_update_callback:
success, error = await self.get_assets()
if error:
state = State(self._platform, self._account, "get_assets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_asset_update_callback, success)
elif indicate_type == INDICATE_POSITION and self.cb.on_position_update_callback:
success, error = await self.get_position(symbol)
if error:
state = State(self._platform, self._account, "get_position error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
return
SingleTask.run(self.cb.on_position_update_callback, success)
if indicate_type == INDICATE_ORDER or indicate_type == INDICATE_ASSET or indicate_type == INDICATE_POSITION:
SingleTask.run(_task)
return True, None
else:
logger.error("indicate_type error! indicate_type:", indicate_type, caller=self)
return False, "indicate_type error"
async def _login(self):
"""FTX的websocket接口真是逗逼,验证成功的情况下居然不会返回任何消息"""
ts = int(time.time() * 1000)
signature = hmac.new(self._secret_key.encode(), f'{ts}websocket_login'.encode(), 'sha256').hexdigest()
args = {
'key': self._access_key,
'sign': signature,
'time': ts
}
#如果是子账户,就添加相应字段
if self._subaccount_name:
args["subaccount"] = self._subaccount_name
data = {'op': 'login', 'args': args}
await self.send_json(data)
async def connected_callback(self):
"""网络链接成功回调
"""
if self._account != None:
#账号不为空就要进行登录认证,然后订阅2个需要登录后才能订阅的私有频道:用户挂单通知和挂单成交通知(FTX只支持这2个私有频道)
await self._login() #登录认证
success, error = await self._rest_api.list_markets()
if error:
state = State(self._platform, self._account, "list_markets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for info in success["result"]:
self._syminfo[info["name"]] = info #符号信息一般不变,获取一次保存好,其他地方要用直接从这个变量获取就可以了
if self.cb.on_order_update_callback != None:
for sym in self._symbols:
orders, error = await self.get_orders(sym)
if error:
state = State(self._platform, self._account, "get_orders error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
for o in orders:
SingleTask.run(self.cb.on_order_update_callback, o)
if self.cb.on_position_update_callback != None:
for sym in self._symbols:
pos, error = await self.get_position(sym)
if error:
state = State(self._platform, self._account, "get_position error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
SingleTask.run(self.cb.on_position_update_callback, pos)
if self.cb.on_asset_update_callback != None:
ast, error = await self.get_assets()
if error:
state = State(self._platform, self._account, "get_assets error: {}".format(error), State.STATE_CODE_GENERAL_ERROR)
SingleTask.run(self.cb.on_state_update_callback, state)
#初始化过程中发生错误,关闭网络连接,触发重连机制
await self.socket_close()
return
SingleTask.run(self.cb.on_asset_update_callback, ast)
#`用户挂单通知回调`不为空,就进行订阅
if self.cb.on_order_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'orders'})
#`用户挂单成交通知回调`不为空,就进行订阅
if self.cb.on_fill_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'fills'})
#计数初始化0
self._subscribe_response_count = 0
async def process(self, msg):
""" Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
"""
if not isinstance(msg, dict):
return
logger.debug("msg:", json.dumps(msg), caller=self)
#{"type": "error", "code": 400, "msg": "Invalid login credentials"}
if msg["type"] == "error":
state = State(self._platform, self._account, "Websocket connection failed: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "pong":
return
elif msg["type"] == "info":
if msg["code"] == 20001:
#交易所重启了,我们就断开连接,websocket会自动重连
@async_method_locker("FTXTrader._ws_close.locker")
async def _ws_close():
await self.socket_close()
SingleTask.run(_ws_close)
elif msg["type"] == "unsubscribed":
return
#{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}
elif msg["type"] == "subscribed":
self._subscribe_response_count = self._subscribe_response_count + 1 #每来一次订阅响应计数就加一
if self._subscribe_response_count == 2: #所有的订阅都成功了,通知上层接口都准备好了
state = State(self._platform, self._account, "Environment ready", State.STATE_CODE_READY)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "update":
channel = msg['channel']
if channel == 'orders':
self._update_order(msg)
elif channel == 'fills':
self._update_fill(msg)
def _update_order(self, order_info):
""" Order update.
Args:
order_info: Order information.
Returns:
None.
"""
#new (accepted but not processed yet), open, or closed (filled or cancelled)
#开仓
#{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#150->修改->151
#{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#151->修改->187->成交
#{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}
#{"id": 742862380, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.0, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 186.96, "postOnly": false, "ioc": false}
#市价全平仓位
#{"id": 742875876, "clientId": null, "market": "ETH-PERP", "type": "market", "side": "sell", "price": null, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": true, "avgFillPrice": 186.79, "postOnly": false, "ioc": true}
o = order_info["data"]
order = self._convert_order_format(o)
if order == None:
return
SingleTask.run(self.cb.on_order_update_callback, order)
def _update_fill(self, fill_info):
""" Fill update.
Args:
fill_info: Fill information.
Returns:
None.
"""
#{"channel": "orders", "type": "update", "data": {"id": 751733812, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.93, "size": 0.001, "status": "closed", "filledSize": 0.001, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 184.25, "postOnly": false, "ioc": false}}
#{"channel": "fills", "type": "update", "data": {"id": 5741311, "market": "ETH-PERP", "future": "ETH-PERP", "baseCurrency": null, "quoteCurrency": null, "type": "order", "side": "buy", "price": 184.25, "size": 0.001, "orderId": 751733812, "time": "2019-11-08T09:52:27.366467+00:00", "feeRate": 0.0007, "fee": 0.000128975, "liquidity": "taker"}}
data = fill_info["data"]
fill_no = str(data["id"])
order_no = str(data["orderId"])
price = float(data["price"])
size = float(data["size"])
fee = float(data["fee"])
ts = tools.utctime_str_to_mts(data["time"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
liquidity = LIQUIDITY_TYPE_TAKER if data["liquidity"]=="taker" else LIQUIDITY_TYPE_MAKER
info = {
"platform": self._platform,
"account": self._account,
"strategy": self._strategy,
"fill_no": fill_no,
"order_no": order_no,
"side": ORDER_ACTION_BUY if data["side"] == "buy" else ORDER_ACTION_SELL,
"symbol": data["market"],
"price": price,
"quantity": size,
"liquidity": liquidity,
"fee": fee,
"ctime": ts
}
fill = Fill(**info)
SingleTask.run(self.cb.on_fill_update_callback, fill)
@staticmethod
def mapping_layer():
""" 获取符号映射关系.
Returns:
layer: 符号映射关系
"""
return None #FTX不需要符号映射
class FTXMarket(Websocket):
""" FTX Trade module. You can initialize trader object with some attributes in kwargs.
"""
def __init__(self, **kwargs):
"""Initialize."""
self._platform = kwargs["platform"]
self._symbols = kwargs["symbols"]
self._host = "https://ftx.com"
self._wss = "wss://ftx.com"
url = self._wss + "/ws"
super(FTXMarket, self).__init__(url, send_hb_interval=15, **kwargs)
self.heartbeat_msg = {"op": "ping"}
self._rest_api = FTXRestAPI(self._host, None, None, None)
#订单簿深度数据
self._orderbooks: DefaultDict[str, Dict[str, DefaultDict[float, float]]] = defaultdict(lambda: {side: defaultdict(float) for side in {'bids', 'asks'}})
self.initialize()
async def _kline_loop_query(self, symbol, *args, **kwargs):
#{"result": [{"close": 7088.5, "high": 7090.0, "low": 7085.75, "open": 7090.0, "startTime": "2019-11-26T16:44:00+00:00", "time": 1574786640000.0, "volume": 0.70885}, {"close": 7088.0, "high": 7088.75, "low": 7088.0, "open": 7088.5, "startTime": "2019-11-26T16:45:00+00:00", "time": 1574786700000.0, "volume": 0.708875}], "success": true}
success, error = await self._rest_api.get_kline(symbol, 60, 2) #取2个时间窗口的数据
if error:
return None, error
if not success["success"]:
return None, "_kline_loop_query error"
result = success["result"]
k = result[0] #这里保存的是上一分钟完整的数据
self._update_kline(k, symbol)
async def connected_callback(self):
"""网络链接成功回调
"""
#订阅公共频道,无需登录认证
for sym in self._symbols:
if self.cb.on_trade_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'trades', 'market': sym})
if self.cb.on_orderbook_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': sym})
if self.cb.on_ticker_update_callback != None:
await self.send_json({'op': 'subscribe', 'channel': 'ticker', 'market': sym})
if self.cb.on_kline_update_callback != None:
LoopRunTask.register(self._kline_loop_query, 60, sym)
async def process(self, msg):
""" Process message that received from websocket.
Args:
msg: message received from websocket.
Returns:
None.
"""
if not isinstance(msg, dict):
return
logger.debug("msg:", json.dumps(msg), caller=self)
#{"type": "pong"}
if msg.get("type") == "pong":
return
#{"type": "error", "code": 400, "msg": "Invalid login credentials"}
elif msg["type"] == "error":
state = State(self._platform, self._account, "Websocket connection failed: {}".format(msg), State.STATE_CODE_GENERAL_ERROR)
logger.error(state, caller=self)
SingleTask.run(self.cb.on_state_update_callback, state)
elif msg["type"] == "info":
if msg["code"] == 20001:
#交易所重启了,我们就断开连接,websocket会自动重连
@async_method_locker("FTXMarket._ws_close.locker")
async def _ws_close():
await self.socket_close()
SingleTask.run(_ws_close)
elif msg["type"] == "unsubscribed":
return
#{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}
elif msg["type"] == "subscribed":
return
elif msg["type"] == "update" or msg["type"] == "partial":
channel = msg['channel']
if channel == 'orderbook':
self._update_orderbook(msg)
elif channel == 'trades':
self._update_trades(msg)
elif channel == 'ticker':
self._update_ticker(msg)
def _update_ticker(self, ticker_info):
""" ticker update.
Args:
ticker_info: ticker information.
Returns:
"""
#{"channel": "ticker", "market": "BTC-PERP", "type": "update", "data": {"bid": 9320.0, "ask": 9323.0, "bidSize": 78.506, "askSize": 101.2467, "last": 9333.5, "time": 1573014477.9969265}}
ts = int(float(ticker_info["data"]["time"])*1000) #转变为毫秒
p = {
"platform": self._platform,
"symbol": ticker_info["market"],
"ask": ticker_info["data"]["ask"],
"bid": ticker_info["data"]["bid"],
"last": ticker_info["data"]["last"],
"timestamp": ts
}
ticker = Ticker(**p)
SingleTask.run(self.cb.on_ticker_update_callback, ticker)
def _update_trades(self, trades_info):
""" trades update.
Args:
trades_info: trades information.
Returns:
"""
#{"channel": "trades", "market": "BTC-PERP", "type": "update", "data": [{"id": 2616562, "price": 9333.25, "size": 0.2143, "side": "sell", "liquidation": false, "time": "2019-11-06T05:19:51.187372+00:00"}]}
for t in trades_info["data"]:
ts = tools.utctime_str_to_mts(t["time"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
p = {
"platform": self._platform,
"symbol": trades_info["market"],
"action": ORDER_ACTION_BUY if t["side"] == "buy" else ORDER_ACTION_SELL,
"price": t["price"],
"quantity": t["size"],
"timestamp": ts
}
trade = Trade(**p)
SingleTask.run(self.cb.on_trade_update_callback, trade)
def _reset_orderbook(self, market: str) -> None:
if market in self._orderbooks:
del self._orderbooks[market]
def _get_orderbook(self, market: str) -> Dict[str, List[Tuple[float, float]]]:
return {
side: sorted(
[(price, quantity) for price, quantity in list(self._orderbooks[market][side].items()) if quantity],
key=lambda order: order[0] * (-1 if side == 'bids' else 1)
)
for side in {'bids', 'asks'}
}
def _update_orderbook(self, orderbook_info):
""" orderbook update.
Args:
orderbook_info: orderbook information.
Returns:
"""
market = orderbook_info['market']
data = orderbook_info['data']
if data['action'] == 'partial':
self._reset_orderbook(market)
for side in {'bids', 'asks'}:
book = self._orderbooks[market][side]
for price, size in data[side]:
if size:
book[price] = size
else:
del book[price]
#end for
checksum = data['checksum']
orderbook = self._get_orderbook(market)
checksum_data = [
':'.join([f'{float(order[0])}:{float(order[1])}' for order in (bid, offer) if order])
for (bid, offer) in zip_longest(orderbook['bids'][:100], orderbook['asks'][:100])
]
computed_result = int(zlib.crc32(':'.join(checksum_data).encode()))
if computed_result != checksum:
#校验和不对就需要重新订阅深度信息
@async_method_locker("FTXMarket._re_subscribe.locker")
async def _re_subscribe():
await self.send_json({'op': 'unsubscribe', 'channel': 'orderbook', 'market': market})
await self.send_json({'op': 'subscribe', 'channel': 'orderbook', 'market': market})
SingleTask.run(_re_subscribe)
#校验和不对就退出
return
logger.debug("orderbook:", json.dumps(orderbook), caller=self)
ts = int(float(data['time'])*1000) #转变为毫秒
p = {
"platform": self._platform,
"symbol": market,
"asks": orderbook['asks'],
"bids": orderbook['bids'],
"timestamp": ts
}
ob = Orderbook(**p)
SingleTask.run(self.cb.on_orderbook_update_callback, ob)
def _update_kline(self, kline_info, symbol):
""" kline update.
Args:
kline_info: kline information.
Returns:
None.
"""
info = {
"platform": self._platform,
"symbol": symbol,
"open": kline_info["open"],
"high": kline_info["high"],
"low": kline_info["low"],
"close": kline_info["close"],
"volume": kline_info["volume"],
"timestamp": tools.utctime_str_to_mts(kline_info["startTime"], "%Y-%m-%dT%H:%M:%S+00:00"),
"kline_type": MARKET_TYPE_KLINE
}
kline = Kline(**info)
SingleTask.run(self.cb.on_kline_update_callback, kline)
| quant/platform/ftx.py | 45,427 | FTX Trade module. You can initialize trader object with some attributes in kwargs.
FTX Trade module. You can initialize trader object with some attributes in kwargs.
Initialize.
Initialize.
将交易所订单结构转换为本交易系统标准订单结构格式
Fill update.
Args:
fill_info: Fill information.
Returns:
None.
kline update.
Args:
kline_info: kline information.
Returns:
None.
Order update.
Args:
order_info: Order information.
Returns:
None.
orderbook update.
Args:
orderbook_info: orderbook information.
Returns:
ticker update.
Args:
ticker_info: ticker information.
Returns:
trades update.
Args:
trades_info: trades information.
Returns:
获取符号映射关系.
Returns:
layer: 符号映射关系
FTX Trade module.
https://docs.ftx.com/
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
-*- coding:utf-8 -*-return await self._request('GET', 'orders', params={'market': market})return await self._request('GET', 'conditional_orders', params={'market': market})GET /markets/{market_name}/candles?resolution={resolution}&limit={limit}&start_time={start_time}&end_time={end_time} Initializing our REST API client.订单簿深度数据如果四个行情回调函数都为空的话,就根本不需要执行市场行情相关代码市场行情数据{"result": {"avgFillPrice": null, "clientId": null, "createdAt": "2019-11-16T11:08:37.726313+00:00", "filledSize": 0.0, "future": "ETH-PERP", "id": 871282987, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 251.0, "reduceOnly": false, "remainingSize": 0.02, "side": "sell", "size": 0.02, "status": "new", "type": "limit"}, "success": true} If len(order_nos) == 0, you will cancel all orders for this symbol. If len(order_nos) > 0, you will cancel an or multiple orders.{"result": {"backstopProvider": false, "collateral": 110.094266926, "freeCollateral": 109.734306926, "initialMarginRequirement": 0.2, "leverage": 5.0, "liquidating": false, "maintenanceMarginRequirement": 0.03, "makerFee": 0.0002, "marginFraction": 61.1703338848761, "openMarginFraction": 61.170278323147016, "positionLimit": null, "positionLimitUsed": 2.15976, "positions": [{"collateralUsed": 0.35996, "cost": -1.7999, "entryPrice": 179.99, "estimatedLiquidationPrice": 11184.0172926, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01723393, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": 0.0001}], "takerFee": 0.0007, "totalAccountValue": 110.094366926, "totalPositionSize": 1.7998, "useFttCollateral": true, "username": "8342537@qq.com"}, "success": true}size-filled会更好{"result": [{"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769622011, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 152.0, "reduceOnly": false, "remainingSize": 0.002, "side": "buy", "size": 0.002, "status": "open", "type": "limit"}, {"avgFillPrice": null, "clientId": null, "createdAt": null, "filledSize": 0.0, "future": "ETH-PERP", "id": 769620713, "ioc": false, "market": "ETH-PERP", "postOnly": false, "price": 150.0, "reduceOnly": false, "remainingSize": 0.001, "side": "buy", "size": 0.001, "status": "open", "type": "limit"}], "success": true}{"result": [{"collateralUsed": 0.35986, "cost": -1.7984, "entryPrice": 179.84, "estimatedLiquidationPrice": 11184.0123266, "future": "ETH-PERP", "initialMarginRequirement": 0.2, "longOrderSize": 0.0, "maintenanceMarginRequirement": 0.03, "netSize": -0.01, "openSize": 0.01, "realizedPnl": 0.01866927, "recentAverageOpenPrice": 179.84, "recentPnl": -0.0009, "shortOrderSize": 0.0, "side": "sell", "size": 0.01, "unrealizedPnl": -0.0009}], "success": true}ftx只有全仓模式,如果想要逐仓模式的话就用子账户的方式来实现空头仓位多头仓位原始数据中没有原始数据中没有原始数据中没有"spot"如果是子账户,就添加相应字段账号不为空就要进行登录认证,然后订阅2个需要登录后才能订阅的私有频道:用户挂单通知和挂单成交通知(FTX只支持这2个私有频道)登录认证初始化过程中发生错误,关闭网络连接,触发重连机制符号信息一般不变,获取一次保存好,其他地方要用直接从这个变量获取就可以了初始化过程中发生错误,关闭网络连接,触发重连机制初始化过程中发生错误,关闭网络连接,触发重连机制初始化过程中发生错误,关闭网络连接,触发重连机制`用户挂单通知回调`不为空,就进行订阅`用户挂单成交通知回调`不为空,就进行订阅计数初始化0{"type": "error", "code": 400, "msg": "Invalid login credentials"}交易所重启了,我们就断开连接,websocket会自动重连{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}每来一次订阅响应计数就加一所有的订阅都成功了,通知上层接口都准备好了new (accepted but not processed yet), open, or closed (filled or cancelled)开仓{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}150->修改->151{"id": 742849571, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 150.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "new", "filledSize": 0.0, "remainingSize": 0.003, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}151->修改->187->成交{"id": 742853455, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 151.0, "size": 0.003, "status": "closed", "filledSize": 0.0, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": null, "postOnly": false, "ioc": false}{"id": 742862380, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.0, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 186.96, "postOnly": false, "ioc": false}市价全平仓位{"id": 742875876, "clientId": null, "market": "ETH-PERP", "type": "market", "side": "sell", "price": null, "size": 0.003, "status": "closed", "filledSize": 0.003, "remainingSize": 0.0, "reduceOnly": true, "avgFillPrice": 186.79, "postOnly": false, "ioc": true}{"channel": "orders", "type": "update", "data": {"id": 751733812, "clientId": null, "market": "ETH-PERP", "type": "limit", "side": "buy", "price": 187.93, "size": 0.001, "status": "closed", "filledSize": 0.001, "remainingSize": 0.0, "reduceOnly": false, "avgFillPrice": 184.25, "postOnly": false, "ioc": false}} {"channel": "fills", "type": "update", "data": {"id": 5741311, "market": "ETH-PERP", "future": "ETH-PERP", "baseCurrency": null, "quoteCurrency": null, "type": "order", "side": "buy", "price": 184.25, "size": 0.001, "orderId": 751733812, "time": "2019-11-08T09:52:27.366467+00:00", "feeRate": 0.0007, "fee": 0.000128975, "liquidity": "taker"}} FTX不需要符号映射订单簿深度数据{"result": [{"close": 7088.5, "high": 7090.0, "low": 7085.75, "open": 7090.0, "startTime": "2019-11-26T16:44:00+00:00", "time": 1574786640000.0, "volume": 0.70885}, {"close": 7088.0, "high": 7088.75, "low": 7088.0, "open": 7088.5, "startTime": "2019-11-26T16:45:00+00:00", "time": 1574786700000.0, "volume": 0.708875}], "success": true}取2个时间窗口的数据这里保存的是上一分钟完整的数据订阅公共频道,无需登录认证{"type": "pong"}{"type": "error", "code": 400, "msg": "Invalid login credentials"}交易所重启了,我们就断开连接,websocket会自动重连{'type': 'subscribed', 'channel': 'trades', 'market': 'BTC-PERP'}{"channel": "ticker", "market": "BTC-PERP", "type": "update", "data": {"bid": 9320.0, "ask": 9323.0, "bidSize": 78.506, "askSize": 101.2467, "last": 9333.5, "time": 1573014477.9969265}}转变为毫秒{"channel": "trades", "market": "BTC-PERP", "type": "update", "data": [{"id": 2616562, "price": 9333.25, "size": 0.2143, "side": "sell", "liquidation": false, "time": "2019-11-06T05:19:51.187372+00:00"}]} end for校验和不对就需要重新订阅深度信息校验和不对就退出转变为毫秒 | 7,544 | en | 0.3317 |
import os
import matplotlib.pyplot as plt
plt.style.use("seaborn")
import numpy as np
from lib.utils import read_csv, find_cargo_root
from lib.blocking import block
data_folder = os.path.join(find_cargo_root(), "data")
save_folder = os.path.join(os.path.dirname(find_cargo_root()), "report", "assets")
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
N = 10
true_val = 15
bruteforce = read_csv(os.path.join(data_folder, "E_vs_MCs_BruteForceMetropolis.csv"))
importance = read_csv(os.path.join(data_folder, "E_vs_MCs_ImportanceMetropolis.csv"))
x = [100, 1000, 3000, 5000, 7000, 10000]
#bruteforce_std = [np.sqrt(block(np.array(vals))[1]) for vals in [bruteforce["energy[au]"][1:up_to] for up_to in x]]
#importance_std = [np.sqrt(block(np.array(vals))[1]) for vals in [importance["energy[au]"][1:up_to] for up_to in x]]
#plt.plot(x, bruteforce_std, "-o", label="Brute-force")
#plt.plot(x, importance_std, "-o", label="Importance")
plt.plot(range(len(bruteforce["energy[au]"][1:])), bruteforce["energy[au]"][1:], "-o", label="Brute-force")
plt.plot(range(len(importance["energy[au]"][1:])), importance["energy[au]"][1:], "-o", label="Importance")
plt.xlabel("Monte Carlo cycles")
plt.ylabel(r"Energy")
plt.legend()
plt.savefig(os.path.join(save_folder, "E_vs_MCs_all.png"))
plt.show() | vmc/result_analysis/E_vs_MCs.py | 1,298 | bruteforce_std = [np.sqrt(block(np.array(vals))[1]) for vals in [bruteforce["energy[au]"][1:up_to] for up_to in x]]importance_std = [np.sqrt(block(np.array(vals))[1]) for vals in [importance["energy[au]"][1:up_to] for up_to in x]]plt.plot(x, bruteforce_std, "-o", label="Brute-force")plt.plot(x, importance_std, "-o", label="Importance") | 337 | en | 0.391776 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-22 11:53
from __future__ import unicode_literals
import company.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Company naem', max_length=100, unique=True)),
('description', models.CharField(max_length=500)),
('website', models.URLField(blank=True, help_text='Company website URL')),
('address', models.CharField(blank=True, help_text='Company address', max_length=200)),
('phone', models.CharField(blank=True, max_length=50)),
('email', models.EmailField(blank=True, max_length=254)),
('contact', models.CharField(blank=True, max_length=100)),
('image', models.ImageField(blank=True, max_length=255, null=True, upload_to=company.models.rename_company_image)),
('notes', models.TextField(blank=True)),
],
),
]
| InvenTree/company/migrations/0001_initial.py | 1,286 | -*- coding: utf-8 -*- Generated by Django 1.11.12 on 2018-04-22 11:53 | 69 | en | 0.59464 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
sys.path.append(os.path.dirname(__file__))
# -- Project information -----------------------------------------------------
project = "DoubletDetection"
copyright = "2019, Adam Gayoso and Jonathan Shor"
author = "Adam Gayoso and Jonathan Shor"
# The full version, including alpha/beta/rc tags
release = "2.5.2"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "m2r"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
master_doc = "index"
| docs/conf.py | 2,088 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/master/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,512 | en | 0.692223 |
"""
This module lets you experience the POWER of FUNCTIONS and PARAMETERS.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Morgan Brown.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
"""
Calls the other functions in this module to test and/or demonstrate them.
"""
drawing_speed = 10 # Bigger numbers mean faster drawing
window = rg.TurtleWindow()
window.tracer(drawing_speed)
# -------------------------------------------------------------------------
# When the _TODO_s ask you to test YOUR code,
# comment-out the following two statements and replace them
# by calls to better_draw_circles et al as needed.
# -------------------------------------------------------------------------
draw_circles(rg.Point(100, 50))
draw_circles(rg.Point(-200, 0))
window.update()
window.close_on_mouse_click()
###############################################################################
# DONE: 2.
# First, RUN this program. You will see that it draws concentric circles
# whose radii vary by 15.
#
# Next, READ:
# -- main.
# Note that it constructs a TurtleWindow and then calls the function
# draw_circles
# twice, sending draw_circles one Point the first time
# and another Point the second time.
# -- The function draw_circles is defined immediately below this _TODO_.
# Be sure that you understand its code! Ask questions as needed!
#
# After you have done the above, change the above _TODO_ to DONE
# and continue to the next _TODO_ below.
#
###############################################################################
def draw_circles(point):
"""
Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles
such that:
-- Each is centered at the given Point, and
-- They have radii: 15 30 45 60 75 ..., respectively.
"""
turtle = rg.SimpleTurtle()
# -------------------------------------------------------------------------
# Draw circles centered at the given Point, by telling the SimpleTurtle to:
# Step 1: Go to the given Point and point east (towards the right).
# Step 2: Go 15 pixels DOWN, with its Pen up.
# Then draw a radius R circle.
# Note: The circle will be centered at the given Point,
# because of the way that the SimpleTurtle draw_circle method works.
# Step 3: Repeat Step 2, but using 30 pixels instead of 15, in both places
# Step 4: Repeat Step 2, but using 45 pixels instead of 15
# Step 5: Repeat Step 2, but using 60 pixels instead of 15
# etc.
# -------------------------------------------------------------------------
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0) # Point "east" (towards the right)
for k in range(1, 11): # k becomes 1, 2, 3, ... 10
turtle.pen_up()
# Go DOWN 15 pixels, ending up pointing east again
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
turtle.draw_circle(15 * k) # Radius 15, 30, 45, 60, ...
###############################################################################
# DONE: 3a.
# The function
# better_draw_circles
# defined below this _TODO_ starts out exactly the same as the code for
# draw_circles
# that you read above.
#
# Your job is to make
# better_draw_circles
# "better" than draw_circles by adding a PARAMETER for the amount
# by which the radii of the concentric circles increase, as described below.
#
# The new better_draw_circles function can do the same thing as
# the draw_circles function, but additionally allows for the radii to
# vary by ANY desired amount. Hence, the new version will be MORE POWERFUL.
#
# So, modify the better_draw_circles function defined BELOW so that
# it has a single ADDITIONAL PARAMETER that is the amount
# by which the radii of the circles increase.
#
# For example, if that new parameter is given the value 15,
# then the circles should have radii: 15 30 45 60 75 ..., respectively,
# just as in draw_circles. But if that new parameter is given the value 3,
# then the circles should have radii: 3 6 9 12 15 18 ..., respectively.
#
# DONE: 3b.
# In main at the place indicated, comment-out the two existing calls
# to draw_circles and add at least two calls to the improved
# better_draw_circles function, to TEST that your modified code is correct
# and does indeed allow for different amounts by which the radii can vary.
#
# #############################################################################
def better_draw_circles(point):
"""
Starts out the same as the draw_circles function defined ABOVE.
You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.
"""
turtle = rg.SimpleTurtle()
turtle.pen_up()
turtle.go_to(point)
turtle.set_heading(0) # Point "east" (towards the right)
for k in range(1, 11): # k becomes 1, 2, 3, ... 10
turtle.pen_up()
# Go DOWN 15 pixels, ending up pointing east again
turtle.right(90)
turtle.forward(15)
turtle.left(90)
turtle.pen_down()
print(15*k) # Radius 15, 30, 45, 60, ...
###############################################################################
# DO: 4a.
# In the previous _TODO_, you made a MORE POWERFUL version
# of draw_circles by introducing a new PARAMETER for the amount
# by which the radii of the concentric circles increase.
#
# In this _TODO_, you will implement a function called
# even_better_draw_circles
# that has FIVE parameters, for:
# -- The center of the concentric circles (as it started with)
# -- The amount by which the radii vary (as you did above)
# -- The number of concentric circles drawn
# -- The pen color of each of the concentric circles
# -- The pen thickness of each of the concentric circles
#
# Hence, this even_better_draw_circles function will be
# even more POWERFUL than the previous functions,
# in that it can draw LOTS of different kinds of circles.
#
# Start by copy-and-pasting the code from better_draw_circles above
# to the body of the even_better_draw_circles function defined below.
# Then add parameters and modify the code to make them work!
#
# TODO: 4b.
# In main at the place indicated, comment-out the existing calls
# to better_draw_circles and add at least two calls to the improved
# even_better_draw_circles function, to TEST that your modified code is
# correct and does indeed use its parameters per their descriptions above.
#
###############################################################################
def even_better_draw_circles(point):
""" An improved version of draw_circles, per the _TODO_ above. """
# READ the above _TODO_ and then copy-paste code from better_circles here:
###############################################################################
# TODO: 5.
#
# Finally, comment-out the existing calls to even_better_draw_circles and
# add code in main to draw various circles that form a BEAUTIFUL picture!
###############################################################################
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| src/m5_why_parameters_are_powerful.py | 7,659 | Starts out the same as the draw_circles function defined ABOVE.
You Will make it an IMPROVED, MORE POWERFUL function per the above _TODO_.
Constructs a SimpleTurtle, then uses the SimpleTurtle to draw 10 circles
such that:
-- Each is centered at the given Point, and
-- They have radii: 15 30 45 60 75 ..., respectively.
An improved version of draw_circles, per the _TODO_ above.
Calls the other functions in this module to test and/or demonstrate them.
This module lets you experience the POWER of FUNCTIONS and PARAMETERS.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Morgan Brown.
DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. Bigger numbers mean faster drawing ------------------------------------------------------------------------- When the _TODO_s ask you to test YOUR code, comment-out the following two statements and replace them by calls to better_draw_circles et al as needed. ------------------------------------------------------------------------- DONE: 2. First, RUN this program. You will see that it draws concentric circles whose radii vary by 15. Next, READ: -- main. Note that it constructs a TurtleWindow and then calls the function draw_circles twice, sending draw_circles one Point the first time and another Point the second time. -- The function draw_circles is defined immediately below this _TODO_. Be sure that you understand its code! Ask questions as needed! After you have done the above, change the above _TODO_ to DONE and continue to the next _TODO_ below. ------------------------------------------------------------------------- Draw circles centered at the given Point, by telling the SimpleTurtle to: Step 1: Go to the given Point and point east (towards the right). Step 2: Go 15 pixels DOWN, with its Pen up. Then draw a radius R circle. Note: The circle will be centered at the given Point, because of the way that the SimpleTurtle draw_circle method works. Step 3: Repeat Step 2, but using 30 pixels instead of 15, in both places Step 4: Repeat Step 2, but using 45 pixels instead of 15 Step 5: Repeat Step 2, but using 60 pixels instead of 15 etc. ------------------------------------------------------------------------- Point "east" (towards the right) k becomes 1, 2, 3, ... 10 Go DOWN 15 pixels, ending up pointing east again Radius 15, 30, 45, 60, ... DONE: 3a. The function better_draw_circles defined below this _TODO_ starts out exactly the same as the code for draw_circles that you read above. Your job is to make better_draw_circles "better" than draw_circles by adding a PARAMETER for the amount by which the radii of the concentric circles increase, as described below. The new better_draw_circles function can do the same thing as the draw_circles function, but additionally allows for the radii to vary by ANY desired amount. Hence, the new version will be MORE POWERFUL. So, modify the better_draw_circles function defined BELOW so that it has a single ADDITIONAL PARAMETER that is the amount by which the radii of the circles increase. For example, if that new parameter is given the value 15, then the circles should have radii: 15 30 45 60 75 ..., respectively, just as in draw_circles. But if that new parameter is given the value 3, then the circles should have radii: 3 6 9 12 15 18 ..., respectively. DONE: 3b. In main at the place indicated, comment-out the two existing calls to draw_circles and add at least two calls to the improved better_draw_circles function, to TEST that your modified code is correct and does indeed allow for different amounts by which the radii can vary. Point "east" (towards the right) k becomes 1, 2, 3, ... 10 Go DOWN 15 pixels, ending up pointing east again Radius 15, 30, 45, 60, ... DO: 4a. In the previous _TODO_, you made a MORE POWERFUL version of draw_circles by introducing a new PARAMETER for the amount by which the radii of the concentric circles increase. In this _TODO_, you will implement a function called even_better_draw_circles that has FIVE parameters, for: -- The center of the concentric circles (as it started with) -- The amount by which the radii vary (as you did above) -- The number of concentric circles drawn -- The pen color of each of the concentric circles -- The pen thickness of each of the concentric circles Hence, this even_better_draw_circles function will be even more POWERFUL than the previous functions, in that it can draw LOTS of different kinds of circles. Start by copy-and-pasting the code from better_draw_circles above to the body of the even_better_draw_circles function defined below. Then add parameters and modify the code to make them work! TODO: 4b. In main at the place indicated, comment-out the existing calls to better_draw_circles and add at least two calls to the improved even_better_draw_circles function, to TEST that your modified code is correct and does indeed use its parameters per their descriptions above. READ the above _TODO_ and then copy-paste code from better_circles here: TODO: 5. Finally, comment-out the existing calls to even_better_draw_circles and add code in main to draw various circles that form a BEAUTIFUL picture! ----------------------------------------------------------------------------- Calls main to start the ball rolling. ----------------------------------------------------------------------------- | 5,660 | en | 0.839812 |
"""
ETNA School API Wrapper
~~~~~~~~~~~~~~~~~~~~~~~
A python wrapper to help make python3 apps/bots using the ETNA API.
:copyright: (c) 2019 Yohann MARTIN
:license: MIT, see LICENSE for more details.
"""
__title__ = 'etnapy'
__author__ = 'Yohann MARTIN'
__license__ = 'MIT'
__version__ = "1.0.0"
from .user import User
from .promo import Promo
from .trophy import Trophy
from .etnapy import Intra
| etnapy/__init__.py | 401 | ETNA School API Wrapper
~~~~~~~~~~~~~~~~~~~~~~~
A python wrapper to help make python3 apps/bots using the ETNA API.
:copyright: (c) 2019 Yohann MARTIN
:license: MIT, see LICENSE for more details. | 197 | en | 0.383838 |
""" compatibility OpenTimelineIO 0.12.0 and older
"""
import os
import re
import sys
import json
import opentimelineio as otio
from . import utils
import clique
self = sys.modules[__name__]
self.track_types = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
self.project_fps = None
def create_otio_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def create_otio_time_range(start_frame, frame_duration, fps):
return otio.opentime.TimeRange(
start_time=create_otio_rational_time(start_frame, fps),
duration=create_otio_rational_time(frame_duration, fps)
)
def create_otio_reference(media_pool_item):
metadata = _get_metadata_media_pool_item(media_pool_item)
mp_clip_property = media_pool_item.GetClipProperty()
path = mp_clip_property["File Path"]
reformat_path = utils.get_reformated_path(path, padded=True)
padding = utils.get_padding_from_path(path)
if padding:
metadata.update({
"isSequence": True,
"padding": padding
})
# get clip property regarding to type
mp_clip_property = media_pool_item.GetClipProperty()
fps = float(mp_clip_property["FPS"])
if mp_clip_property["Type"] == "Video":
frame_start = int(mp_clip_property["Start"])
frame_duration = int(mp_clip_property["Frames"])
else:
audio_duration = str(mp_clip_property["Duration"])
frame_start = 0
frame_duration = int(utils.timecode_to_frames(
audio_duration, float(fps)))
otio_ex_ref_item = None
if padding:
# if it is file sequence try to create `ImageSequenceReference`
# the OTIO might not be compatible so return nothing and do it old way
try:
dirname, filename = os.path.split(path)
collection = clique.parse(filename, '{head}[{ranges}]{tail}')
padding_num = len(re.findall("(\\d+)(?=-)", filename).pop())
otio_ex_ref_item = otio.schema.ImageSequenceReference(
target_url_base=dirname + os.sep,
name_prefix=collection.format("{head}"),
name_suffix=collection.format("{tail}"),
start_frame=frame_start,
frame_zero_padding=padding_num,
rate=fps,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
except AttributeError:
pass
if not otio_ex_ref_item:
# in case old OTIO or video file create `ExternalReference`
otio_ex_ref_item = otio.schema.ExternalReference(
target_url=reformat_path,
available_range=create_otio_time_range(
frame_start,
frame_duration,
fps
)
)
# add metadata to otio item
add_otio_metadata(otio_ex_ref_item, media_pool_item, **metadata)
return otio_ex_ref_item
def create_otio_markers(track_item, fps):
track_item_markers = track_item.GetMarkers()
markers = []
for marker_frame in track_item_markers:
note = track_item_markers[marker_frame]["note"]
if "{" in note and "}" in note:
metadata = json.loads(note)
else:
metadata = {"note": note}
markers.append(
otio.schema.Marker(
name=track_item_markers[marker_frame]["name"],
marked_range=create_otio_time_range(
marker_frame,
track_item_markers[marker_frame]["duration"],
fps
),
color=track_item_markers[marker_frame]["color"].upper(),
metadata=metadata
)
)
return markers
def create_otio_clip(track_item):
media_pool_item = track_item.GetMediaPoolItem()
mp_clip_property = media_pool_item.GetClipProperty()
if not self.project_fps:
fps = mp_clip_property["FPS"]
else:
fps = self.project_fps
name = track_item.GetName()
media_reference = create_otio_reference(media_pool_item)
source_range = create_otio_time_range(
int(track_item.GetLeftOffset()),
int(track_item.GetDuration()),
fps
)
if mp_clip_property["Type"] == "Audio":
return_clips = list()
audio_chanels = mp_clip_property["Audio Ch"]
for channel in range(0, int(audio_chanels)):
clip = otio.schema.Clip(
name=f"{name}_{channel}",
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return_clips.append(clip)
return return_clips
else:
clip = otio.schema.Clip(
name=name,
source_range=source_range,
media_reference=media_reference
)
for marker in create_otio_markers(track_item, fps):
clip.markers.append(marker)
return clip
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
return otio.schema.Gap(
source_range=create_otio_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
fps
)
)
def _create_otio_timeline(project, timeline, fps):
metadata = _get_timeline_metadata(project, timeline)
start_time = create_otio_rational_time(
timeline.GetStartFrame(), fps)
otio_timeline = otio.schema.Timeline(
name=timeline.GetName(),
global_start_time=start_time,
metadata=metadata
)
return otio_timeline
def _get_timeline_metadata(project, timeline):
media_pool = project.GetMediaPool()
root_folder = media_pool.GetRootFolder()
ls_folder = root_folder.GetClipList()
timeline = project.GetCurrentTimeline()
timeline_name = timeline.GetName()
for tl in ls_folder:
if tl.GetName() not in timeline_name:
continue
return _get_metadata_media_pool_item(tl)
def _get_metadata_media_pool_item(media_pool_item):
data = dict()
data.update({k: v for k, v in media_pool_item.GetMetadata().items()})
property = media_pool_item.GetClipProperty() or {}
for name, value in property.items():
if "Resolution" in name and "" != value:
width, height = value.split("x")
data.update({
"width": int(width),
"height": int(height)
})
if "PAR" in name and "" != value:
try:
data.update({"pixelAspect": float(value)})
except ValueError:
if "Square" in value:
data.update({"pixelAspect": float(1)})
else:
data.update({"pixelAspect": float(1)})
return data
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=self.track_types[track_type]
)
def add_otio_gap(clip_start, otio_track, track_item, timeline):
# if gap between track start and clip start
if clip_start > otio_track.available_range().duration.value:
# create gap and add it to track
otio_track.append(
create_otio_gap(
otio_track.available_range().duration.value,
track_item.GetStart(),
timeline.GetStartFrame(),
self.project_fps
)
)
def add_otio_metadata(otio_item, media_pool_item, **kwargs):
mp_metadata = media_pool_item.GetMetadata()
# add additional metadata from kwargs
if kwargs:
mp_metadata.update(kwargs)
# add metadata to otio item metadata
for key, value in mp_metadata.items():
otio_item.metadata.update({key: value})
def create_otio_timeline(resolve_project):
# get current timeline
self.project_fps = resolve_project.GetSetting("timelineFrameRate")
timeline = resolve_project.GetCurrentTimeline()
# convert timeline to otio
otio_timeline = _create_otio_timeline(
resolve_project, timeline, self.project_fps)
# loop all defined track types
for track_type in list(self.track_types.keys()):
# get total track count
track_count = timeline.GetTrackCount(track_type)
# loop all tracks by track indexes
for track_index in range(1, int(track_count) + 1):
# get current track name
track_name = timeline.GetTrackName(track_type, track_index)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
# get all track items in current track
current_track_items = timeline.GetItemListInTrack(
track_type, track_index)
# loop available track items in current track items
for track_item in current_track_items:
# skip offline track items
if track_item.GetMediaPoolItem() is None:
continue
# calculate real clip start
clip_start = track_item.GetStart() - timeline.GetStartFrame()
add_otio_gap(
clip_start, otio_track, track_item, timeline)
# create otio clip and add it to track
otio_clip = create_otio_clip(track_item)
if not isinstance(otio_clip, list):
otio_track.append(otio_clip)
else:
for index, clip in enumerate(otio_clip):
if index == 0:
otio_track.append(clip)
else:
# add previouse otio track to timeline
otio_timeline.tracks.append(otio_track)
# convert track to otio
otio_track = create_otio_track(
track_type, track_name)
add_otio_gap(
clip_start, otio_track,
track_item, timeline)
otio_track.append(clip)
# add track to otio timeline
otio_timeline.tracks.append(otio_track)
return otio_timeline
def write_to_file(otio_timeline, path):
otio.adapters.write_to_file(otio_timeline, path)
| openpype/hosts/resolve/otio/davinci_export.py | 10,559 | compatibility OpenTimelineIO 0.12.0 and older
get clip property regarding to type if it is file sequence try to create `ImageSequenceReference` the OTIO might not be compatible so return nothing and do it old way in case old OTIO or video file create `ExternalReference` add metadata to otio item if gap between track start and clip start create gap and add it to track add additional metadata from kwargs add metadata to otio item metadata get current timeline convert timeline to otio loop all defined track types get total track count loop all tracks by track indexes get current track name convert track to otio get all track items in current track loop available track items in current track items skip offline track items calculate real clip start create otio clip and add it to track add previouse otio track to timeline convert track to otio add track to otio timeline | 878 | en | 0.702404 |
""" generators for the neuron project """
# general imports
import sys
import os
import zipfile
# third party imports
import numpy as np
import nibabel as nib
import scipy
import keras
from keras.utils import np_utils
from keras.models import Model
# local packages
import pynd.ndutils as nd
import pytools.patchlib as pl
import pytools.timer as timer
# reload patchlib (it's often updated right now...)
from imp import reload
reload(pl)
# other neuron (this project) packages
from . import dataproc as nrn_proc
from . import models as nrn_models
class Vol(object):
def __init__(self,
volpath,
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
name='single_vol', # name
fixed_vol_size=True, # assumes each volume is fixed size
):
# get filenames at given paths
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert nb_files > 0, "Could not find any files at %s with extension %s" % (volpath, ext)
# set up restart cycle for volume files --
# i.e. after how many volumes do we restart
if nb_restart_cycle is None:
nb_restart_cycle = nb_files
# compute subvolume split
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
[f for f in _npz_headers(npz, namelist=['vol_data.npy'])][0][1]
nb_patches_per_vol = 1
if fixed_vol_size and (patch_size is not None) and all(f is not None for f in patch_size):
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \
'%s restart cycle (%s) too big (%s) in %s' % \
(name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)
def vol(volpath,
ext='.npz',
batch_size=1,
expected_nb_files=-1,
expected_files=None,
data_proc_fn=None, # processing function that takes in one arg (the volume)
relabel=None, # relabeling array
nb_labels_reshape=0, # reshape to categorial format for keras, need # labels
keep_vol_size=False, # whether to keep the volume size on categorical resizing
name='single_vol', # name, optional
nb_restart_cycle=None, # number of files to restart after
patch_size=None, # split the volume in patches? if so, get patch_size
patch_stride=1, # split the volume in patches? if so, get patch_stride
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_feats=1,
patch_rand=False,
patch_rand_seed=None,
vol_rand_seed=None,
binary=False,
yield_incomplete_final_batch=True,
verbose=False):
"""
generator for single volume (or volume patches) from a list of files
simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,
and prepares it for keras model formats
if a patch size is passed, breaks the volume into patches and generates those
"""
# get filenames at given paths
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
assert nb_files > 0, "Could not find any files at %s with extension %s" % (volpath, ext)
# compute subvolume split
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[0]), ext)
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
nb_patches_per_vol = 1
if patch_size is not None and all(f is not None for f in patch_size):
if relabel is None and len(patch_size) == (len(vol_data.shape) - 1):
tmp_patch_size = [f for f in patch_size]
patch_size = [*patch_size, vol_data.shape[-1]]
patch_stride = [f for f in patch_stride]
patch_stride = [*patch_stride, vol_data.shape[-1]]
assert len(vol_data.shape) == len(patch_size), "Vol dims %d are not equal to patch dims %d" % (len(vol_data.shape), len(patch_size))
nb_patches_per_vol = np.prod(pl.gridsize(vol_data.shape, patch_size, patch_stride))
if nb_restart_cycle is None:
print("setting restart cycle to", nb_files)
nb_restart_cycle = nb_files
assert nb_restart_cycle <= (nb_files * nb_patches_per_vol), \
'%s restart cycle (%s) too big (%s) in %s' % \
(name, nb_restart_cycle, nb_files * nb_patches_per_vol, volpath)
# check the number of files matches expected (if passed)
if expected_nb_files >= 0:
assert nb_files == expected_nb_files, \
"number of files do not match: %d, %d" % (nb_files, expected_nb_files)
if expected_files is not None:
if not (volfiles == expected_files):
print('file lists did not match. You should probably stop execution.', file=sys.stderr)
print(len(volfiles), len(expected_files))
if verbose:
print('nb_restart_cycle:', nb_restart_cycle)
# iterate through files
fileidx = -1
batch_idx = -1
feat_idx = 0
batch_shape = None
while 1:
fileidx = np.mod(fileidx + 1, nb_restart_cycle)
if verbose and fileidx == 0:
print('starting %s cycle' % name)
# read next file (circular)
try:
if verbose:
print('opening %s' % os.path.join(volpath, volfiles[fileidx]))
file_name = os.path.join(volpath, volfiles[fileidx])
vol_data = _load_medical_volume(file_name, ext, verbose)
# print(file_name, " was loaded", vol_data.shape)
except:
debug_error_msg = "#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s"
print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))
raise
# process volume
if data_proc_fn is not None:
vol_data = data_proc_fn(vol_data)
# the original segmentation files have non-sequential relabel (i.e. some relabel are
# missing to avoid exploding our model, we only care about the relabel that exist.
if relabel is not None:
vol_data = _relabel(vol_data, relabel)
# split volume into patches if necessary and yield
if patch_size is None:
this_patch_size = vol_data.shape
patch_stride = [1 for f in this_patch_size]
else:
this_patch_size = [f for f in patch_size]
for pi, p in enumerate(this_patch_size):
if p is None:
this_patch_size[pi] = vol_data.shape[pi]
patch_stride[pi] = 1
assert ~np.any(np.isnan(vol_data)), "Found a nan for %s" % volfiles[fileidx]
assert np.all(np.isfinite(vol_data)), "Found a inf for %s" % volfiles[fileidx]
patch_gen = patch(vol_data, this_patch_size,
patch_stride=patch_stride,
nb_labels_reshape=nb_labels_reshape,
batch_size=1,
infinite=False,
collapse_2d=collapse_2d,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
keep_vol_size=keep_vol_size)
empty_gen = True
patch_idx = -1
for lpatch in patch_gen:
empty_gen = False
patch_idx += 1
# add to feature
if np.mod(feat_idx, nb_feats) == 0:
vol_data_feats = lpatch
else:
vol_data_feats = np.concatenate([vol_data_feats, lpatch], np.ndim(lpatch)-1)
feat_idx += 1
if binary:
vol_data_feats = vol_data_feats.astype(bool)
if np.mod(feat_idx, nb_feats) == 0:
feats_shape = vol_data_feats[1:]
# yield previous batch if the new volume has different patch sizes
if batch_shape is not None and (feats_shape != batch_shape):
batch_idx = -1
batch_shape = None
print('switching patch sizes')
yield np.vstack(vol_data_batch)
# add to batch of volume data, unless the batch is currently empty
if batch_idx == -1:
vol_data_batch = [vol_data_feats]
batch_shape = vol_data_feats[1:]
else:
vol_data_batch = [*vol_data_batch, vol_data_feats]
# yield patch
batch_idx += 1
batch_done = batch_idx == batch_size - 1
files_done = np.mod(fileidx + 1, nb_restart_cycle) == 0
final_batch = yield_incomplete_final_batch and files_done and patch_idx == (nb_patches_per_vol-1)
if final_batch: # verbose and
print('last batch in %s cycle %d. nb_batch:%d' % (name, fileidx, len(vol_data_batch)))
if batch_done or final_batch:
batch_idx = -1
q = np.vstack(vol_data_batch)
yield q
if empty_gen:
raise ValueError('Patch generator was empty for file %s', volfiles[fileidx])
def patch(vol_data, # the volume
patch_size, # patch size
patch_stride=1, # patch stride (spacing)
nb_labels_reshape=1, # number of labels for categorical resizing. 0 if no resizing
keep_vol_size=False, # whether to keep the volume size on categorical resizing
batch_size=1, # batch size
collapse_2d=None,
patch_rand=False,
patch_rand_seed=None,
variable_batch_size=False,
infinite=False): # whether the generator should continue (re)-generating patches
"""
generate patches from volume for keras package
Yields:
patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape
"""
# some parameter setup
assert batch_size >= 1, "batch_size should be at least 1"
if patch_size is None:
patch_size = vol_data.shape
for pi,p in enumerate(patch_size):
if p is None:
patch_size[pi] = vol_data.shape[pi]
batch_idx = -1
if variable_batch_size:
batch_size = yield
# do while. if not infinite, will break at the end
while True:
# create patch generator
gen = pl.patch_gen(vol_data, patch_size,
stride=patch_stride,
rand=patch_rand,
rand_seed=patch_rand_seed)
# go through the patch generator
empty_gen = True
for lpatch in gen:
empty_gen = False
# reshape output layer as categorical and prep proper size
# print(lpatch.shape, nb_labels_reshape, keep_vol_size, patch_size)
lpatch = _categorical_prep(lpatch, nb_labels_reshape, keep_vol_size, patch_size)
if collapse_2d is not None:
lpatch = np.squeeze(lpatch, collapse_2d + 1) # +1 due to batch in first dim
# add this patch to the stack
if batch_idx == -1:
if batch_size == 1:
patch_data_batch = lpatch
else:
patch_data_batch = np.zeros([batch_size, *lpatch.shape[1:]])
patch_data_batch[0, :] = lpatch
else:
patch_data_batch[batch_idx+1, :] = lpatch
# yield patch
batch_idx += 1
if batch_idx == batch_size - 1:
batch_idx = -1
batch_size_y = yield patch_data_batch
if variable_batch_size:
batch_size = batch_size_y
assert not empty_gen, 'generator was empty. vol size was %s' % ''.join(['%d '%d for d in vol_data.shape])
# if not infinite generation, yield the last batch and break the while
if not infinite:
if batch_idx >= 0:
patch_data_batch = patch_data_batch[:(batch_idx+1), :]
yield patch_data_batch
break
def vol_seg(volpath,
segpath,
proc_vol_fn=None,
proc_seg_fn=None,
verbose=False,
name='vol_seg', # name, optional
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
nb_labels_reshape=-1,
collapse_2d=None,
force_binary=False,
nb_input_feats=1,
relabel=None,
vol_rand_seed=None,
seg_binary=False,
vol_subname='norm', # subname of volume
seg_subname='aseg', # subname of segmentation
**kwargs):
"""
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
"""
# get vol generator
vol_gen = vol(volpath, **kwargs, ext=ext,
nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,
relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',
verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
# get seg generator, matching nb_files
# vol_files = [f.replace('norm', 'aseg') for f in _get_file_list(volpath, ext)]
# vol_files = [f.replace('orig', 'aseg') for f in vol_files]
vol_files = [f.replace(vol_subname, seg_subname) for f in _get_file_list(volpath, ext, vol_rand_seed)]
seg_gen = vol(segpath, **kwargs, ext=ext, nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d,
force_binary=force_binary, relabel=relabel, vol_rand_seed=vol_rand_seed,
data_proc_fn=proc_seg_fn, nb_labels_reshape=nb_labels_reshape, keep_vol_size=True,
expected_files=vol_files, name=name+' seg', binary=seg_binary, verbose=False)
# on next (while):
while 1:
# get input and output (seg) vols
input_vol = next(vol_gen).astype('float16')
output_vol = next(seg_gen).astype('float16') # was int8. Why? need float possibility...
# output input and output
yield (input_vol, output_vol)
def vol_cat(volpaths, # expect two folders in here
crop=None, resize_shape=None, rescale=None, # processing parameters
verbose=False,
name='vol_cat', # name, optional
ext='.npz',
nb_labels_reshape=-1,
vol_rand_seed=None,
**kwargs): # named arguments for vol(...), except verbose, data_proc_fn, ext, nb_labels_reshape and name (which this function will control when calling vol())
"""
generator with (volume, binary_bit) (random order)
ONLY works with abtch size of 1 for now
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
"""
folders = [f for f in sorted(os.listdir(volpaths))]
# compute processing function
proc_vol_fn = lambda x: nrn_proc.vol_proc(x, crop=crop, resize_shape=resize_shape,
interp_order=2, rescale=rescale)
# get vol generators
generators = ()
generators_len = ()
for folder in folders:
vol_gen = vol(os.path.join(volpaths, folder), **kwargs, ext=ext, vol_rand_seed=vol_rand_seed,
data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=folder, verbose=False)
generators_len += (len(_get_file_list(os.path.join(volpaths, folder), '.npz')), )
generators += (vol_gen, )
bake_data_test = False
if bake_data_test:
print('fake_data_test', file=sys.stderr)
# on next (while):
while 1:
# build the random order stack
order = np.hstack((np.zeros(generators_len[0]), np.ones(generators_len[1]))).astype('int')
np.random.shuffle(order) # shuffle
for idx in order:
gen = generators[idx]
# for idx, gen in enumerate(generators):
z = np.zeros([1, 2]) #1,1,2 for categorical binary style
z[0,idx] = 1 #
# z[0,0,0] = idx
data = next(gen).astype('float32')
if bake_data_test and idx == 0:
# data = data*idx
data = -data
yield (data, z)
def add_prior(gen,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
verbose=False,
patch_rand=False,
patch_rand_seed=None):
"""
#
# add a prior generator to a given generator
# with the number of patches in batch matching output of gen
"""
# get prior
if prior_type == 'location':
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model
elif prior_type == 'file': # assumes a npz filename passed in prior_file
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else: # assumes a volume
with timer.Timer('loading prior', True):
prior_vol = prior_file.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[-1]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[-1]
if extract_slice is not None:
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else: # assume slices
prior_vol = prior_vol[:, :, extract_slice, :]
# get the prior to have the right volume [x, y, z, nb_channels]
assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, "prior is the wrong size"
# prior generator
if patch_size is None:
patch_size = prior_vol.shape[0:3]
assert len(patch_size) == len(patch_stride)
prior_gen = patch(prior_vol, [*patch_size, nb_channels],
patch_stride=[*patch_stride, nb_channels],
batch_size=batch_size,
collapse_2d=collapse_2d,
keep_vol_size=True,
infinite=True,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
variable_batch_size=True,
nb_labels_reshape=0)
assert next(prior_gen) is None, "bad prior gen setup"
# generator loop
while 1:
# generate input and output volumes
gen_sample = next(gen)
# generate prior batch
gs_sample = _get_shape(gen_sample)
prior_batch = prior_gen.send(gs_sample)
yield (gen_sample, prior_batch)
def vol_prior(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
patch_rand=False,
**kwargs): # anything else you'd like to pass to vol()
"""
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
"""
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
# prepare the vol_seg
vol_gen = vol(*args,
**kwargs,
collapse_2d=collapse_2d,
force_binary=False,
verbose=verbose,
vol_rand_seed=vol_rand_seed)
gen = vol(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
nb_input_feats=nb_input_feats)
# add prior to output
pgen = add_prior(gen,
proc_vol_fn=proc_vol_fn,
proc_seg_fn=proc_seg_fn,
prior_type=prior_type,
prior_file=prior_file,
prior_feed=prior_feed,
patch_stride=patch_stride,
patch_size=patch_size,
batch_size=batch_size,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
vol_rand_seed=vol_rand_seed)
# generator loop
while 1:
gen_sample, prior_batch = next(pgen)
input_vol, output_vol = gen_sample
if prior_feed == 'input':
yield ([input_vol, prior_batch], output_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [output_vol, prior_batch])
def vol_seg_prior(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
patch_rand=None,
**kwargs):
"""
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
"""
patch_rand_seed = None
if patch_rand:
patch_rand_seed = np.random.random()
# prepare the vol_seg
gen = vol_seg(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed,
nb_input_feats=nb_input_feats)
# add prior to output
pgen = add_prior(gen,
proc_vol_fn=proc_vol_fn,
proc_seg_fn=proc_seg_fn,
prior_type=prior_type,
prior_file=prior_file,
prior_feed=prior_feed,
patch_stride=patch_stride,
patch_size=patch_size,
batch_size=batch_size,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_rand=patch_rand,
patch_rand_seed=patch_rand_seed)
# generator loop
while 1:
gen_sample, prior_batch = next(pgen)
input_vol, output_vol = gen_sample
if prior_feed == 'input':
yield ([input_vol, prior_batch], output_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [output_vol, prior_batch])
def vol_prior_hack(*args,
proc_vol_fn=None,
proc_seg_fn=None,
prior_type='location', # file-static, file-gen, location
prior_file=None, # prior filename
prior_feed='input', # input or output
patch_stride=1,
patch_size=None,
batch_size=1,
collapse_2d=None,
extract_slice=None,
force_binary=False,
nb_input_feats=1,
verbose=False,
vol_rand_seed=None,
**kwargs):
"""
"""
# prepare the vol_seg
gen = vol_seg_hack(*args, **kwargs,
proc_vol_fn=None,
proc_seg_fn=None,
collapse_2d=collapse_2d,
extract_slice=extract_slice,
force_binary=force_binary,
verbose=verbose,
patch_size=patch_size,
patch_stride=patch_stride,
batch_size=batch_size,
vol_rand_seed=vol_rand_seed,
nb_input_feats=nb_input_feats)
# get prior
if prior_type == 'location':
prior_vol = nd.volsize2ndgrid(vol_size)
prior_vol = np.transpose(prior_vol, [1, 2, 3, 0])
prior_vol = np.expand_dims(prior_vol, axis=0) # reshape for model
elif prior_type == 'file': # assumes a npz filename passed in prior_file
with timer.Timer('loading prior', True):
data = np.load(prior_file)
prior_vol = data['prior'].astype('float16')
else : # assumes a volume
with timer.Timer('astyping prior', verbose):
prior_vol = prior_file
if not (prior_vol.dtype == 'float16'):
prior_vol = prior_vol.astype('float16')
if force_binary:
nb_labels = prior_vol.shape[-1]
prior_vol[:, :, :, 1] = np.sum(prior_vol[:, :, :, 1:nb_labels], 3)
prior_vol = np.delete(prior_vol, range(2, nb_labels), 3)
nb_channels = prior_vol.shape[-1]
if extract_slice is not None:
if isinstance(extract_slice, int):
prior_vol = prior_vol[:, :, extract_slice, np.newaxis, :]
else: # assume slices
prior_vol = prior_vol[:, :, extract_slice, :]
# get the prior to have the right volume [x, y, z, nb_channels]
assert np.ndim(prior_vol) == 4 or np.ndim(prior_vol) == 3, "prior is the wrong size"
# prior generator
if patch_size is None:
patch_size = prior_vol.shape[0:3]
assert len(patch_size) == len(patch_stride)
prior_gen = patch(prior_vol, [*patch_size, nb_channels],
patch_stride=[*patch_stride, nb_channels],
batch_size=batch_size,
collapse_2d=collapse_2d,
keep_vol_size=True,
infinite=True,
#variable_batch_size=True, # this
nb_labels_reshape=0)
# assert next(prior_gen) is None, "bad prior gen setup"
# generator loop
while 1:
# generate input and output volumes
input_vol = next(gen)
if verbose and np.all(input_vol.flat == 0):
print("all entries are 0")
# generate prior batch
# with timer.Timer("with send?"):
# prior_batch = prior_gen.send(input_vol.shape[0])
prior_batch = next(prior_gen)
if prior_feed == 'input':
yield ([input_vol, prior_batch], input_vol)
else:
assert prior_feed == 'output'
yield (input_vol, [input_vol, prior_batch])
def vol_seg_hack(volpath,
segpath,
proc_vol_fn=None,
proc_seg_fn=None,
verbose=False,
name='vol_seg', # name, optional
ext='.npz',
nb_restart_cycle=None, # number of files to restart after
nb_labels_reshape=-1,
collapse_2d=None,
force_binary=False,
nb_input_feats=1,
relabel=None,
vol_rand_seed=None,
seg_binary=False,
vol_subname='norm', # subname of volume
seg_subname='aseg', # subname of segmentation
**kwargs):
"""
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
"""
# get vol generator
vol_gen = vol(volpath, **kwargs, ext=ext,
nb_restart_cycle=nb_restart_cycle, collapse_2d=collapse_2d, force_binary=False,
relabel=None, data_proc_fn=proc_vol_fn, nb_labels_reshape=1, name=name+' vol',
verbose=verbose, nb_feats=nb_input_feats, vol_rand_seed=vol_rand_seed)
# on next (while):
while 1:
# get input and output (seg) vols
input_vol = next(vol_gen).astype('float16')
# output input and output
yield input_vol
def vol_sr_slices(volpath,
nb_input_slices,
nb_slice_spacing,
batch_size=1,
ext='.npz',
vol_rand_seed=None,
nb_restart_cycle=None,
name='vol_sr_slices',
rand_slices=True, # randomize init slice order (i.e. across entries per batch) given a volume
simulate_whole_sparse_vol=False,
verbose=False
):
"""
default generator for slice-wise super resolution
"""
def indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing):
idx = start_indices[0]
output_batch = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)
input_batch = np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)
for idx in start_indices[1:]:
out_sel = np.expand_dims(vol_data[:,:,idx:idx+nb_slices_in_subvol], 0)
output_batch = np.vstack([output_batch, out_sel])
input_batch = np.vstack([input_batch, np.expand_dims(vol_data[:,:,idx:(idx+nb_slices_in_subvol):(nb_slice_spacing+1)], 0)])
output_batch = np.reshape(output_batch, [batch_size, -1, output_batch.shape[-1]])
return (input_batch, output_batch)
print('vol_sr_slices: SHOULD PROPERLY RANDOMIZE accross different subjects', file=sys.stderr)
volfiles = _get_file_list(volpath, ext, vol_rand_seed)
nb_files = len(volfiles)
if nb_restart_cycle is None:
nb_restart_cycle = nb_files
# compute the number of slices we'll need in a subvolume
nb_slices_in_subvol = (nb_input_slices - 1) * (nb_slice_spacing + 1) + 1
# iterate through files
fileidx = -1
while 1:
fileidx = np.mod(fileidx + 1, nb_restart_cycle)
if verbose and fileidx == 0:
print('starting %s cycle' % name)
try:
vol_data = _load_medical_volume(os.path.join(volpath, volfiles[fileidx]), ext, verbose)
except:
debug_error_msg = "#files: %d, fileidx: %d, nb_restart_cycle: %d. error: %s"
print(debug_error_msg % (len(volfiles), fileidx, nb_restart_cycle, sys.exc_info()[0]))
raise
# compute some random slice
nb_slices = vol_data.shape[2]
nb_start_slices = nb_slices - nb_slices_in_subvol + 1
# prepare batches
if simulate_whole_sparse_vol: # if essentially simulate a whole sparse volume for consistent inputs, and yield slices like that:
init_slice = 0
if rand_slices:
init_slice = np.random.randint(0, high=nb_start_slices-1)
all_start_indices = list(range(init_slice, nb_start_slices, nb_slice_spacing+1))
for batch_start in range(0, len(all_start_indices), batch_size*(nb_input_slices-1)):
start_indices = [all_start_indices[s] for s in range(batch_start, batch_start + batch_size)]
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
# if just random slices, get a batch of random starts from this volume and that's it.
elif rand_slices:
assert not simulate_whole_sparse_vol
start_indices = np.random.choice(range(nb_start_slices), size=batch_size, replace=False)
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
# go slice by slice (overlapping regions)
else:
for batch_start in range(0, nb_start_slices, batch_size):
start_indices = list(range(batch_start, batch_start + batch_size))
input_batch, output_batch = indices_to_batch(vol_data, start_indices, nb_slices_in_subvol, nb_slice_spacing)
yield (input_batch, output_batch)
def img_seg(volpath,
segpath,
batch_size=1,
verbose=False,
nb_restart_cycle=None,
name='img_seg', # name, optional
ext='.png',
vol_rand_seed=None,
**kwargs):
"""
generator for (image, segmentation)
"""
def imggen(path, ext, nb_restart_cycle=None):
"""
TODO: should really use the volume generators for this
"""
files = _get_file_list(path, ext, vol_rand_seed)
if nb_restart_cycle is None:
nb_restart_cycle = len(files)
idx = -1
while 1:
idx = np.mod(idx+1, nb_restart_cycle)
im = scipy.misc.imread(os.path.join(path, files[idx]))[:, :, 0]
yield im.reshape((1,) + im.shape)
img_gen = imggen(volpath, ext, nb_restart_cycle)
seg_gen = imggen(segpath, ext)
# on next (while):
while 1:
input_vol = np.vstack([next(img_gen).astype('float16')/255 for i in range(batch_size)])
input_vol = np.expand_dims(input_vol, axis=-1)
output_vols = [np_utils.to_categorical(next(seg_gen).astype('int8'), num_classes=2) for i in range(batch_size)]
output_vol = np.vstack([np.expand_dims(f, axis=0) for f in output_vols])
# output input and output
yield (input_vol, output_vol)
# Some internal use functions
def _get_file_list(volpath, ext=None, vol_rand_seed=None):
"""
get a list of files at the given path with the given extension
"""
files = [f for f in sorted(os.listdir(volpath)) if ext is None or f.endswith(ext)]
if vol_rand_seed is not None:
np.random.seed(vol_rand_seed)
files = np.random.permutation(files).tolist()
return files
def _load_medical_volume(filename, ext, verbose=False):
"""
load a medical volume from one of a number of file types
"""
with timer.Timer('load_vol', verbose >= 2):
if ext == '.npz':
vol_file = np.load(filename)
vol_data = vol_file['vol_data']
elif ext == 'npy':
vol_data = np.load(filename)
elif ext == '.mgz' or ext == '.nii' or ext == '.nii.gz':
vol_med = nib.load(filename)
vol_data = vol_med.get_data()
else:
raise ValueError("Unexpected extension %s" % ext)
return vol_data
def _categorical_prep(vol_data, nb_labels_reshape, keep_vol_size, patch_size):
if nb_labels_reshape > 1:
lpatch = _to_categorical(vol_data, nb_labels_reshape, keep_vol_size)
# if keep_vol_size:
# lpatch = np.reshape(lpatch, [*patch_size, nb_labels_reshape])
elif nb_labels_reshape == 1:
lpatch = np.expand_dims(vol_data, axis=-1)
else:
assert nb_labels_reshape == 0
lpatch = vol_data
lpatch = np.expand_dims(lpatch, axis=0)
return lpatch
def _to_categorical(y, num_classes=None, reshape=True):
"""
# copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
"""
oshape = y.shape
y = np.array(y, dtype='int').ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), bool)
categorical[np.arange(n), y] = 1
if reshape:
categorical = np.reshape(categorical, [*oshape, num_classes])
return categorical
def _relabel(vol_data, labels, forcecheck=False):
if forcecheck:
vd = np.unique(vol_data.flat)
assert len(vd) == len(labels), "number of given labels does not match number of actual labels"
# by doing zeros, any label not in labels gets left to 0
new_vol_data = np.zeros(vol_data.shape, vol_data.dtype)
for idx, val in np.ndenumerate(labels):
new_vol_data[vol_data == val] = idx
return new_vol_data
def _npz_headers(npz, namelist=None):
"""
taken from https://stackoverflow.com/a/43223420
Takes a path to an .npz file, which is a Zip archive of .npy files.
Generates a sequence of (name, shape, np.dtype).
namelist is a list with variable names, ending in '.npy'.
e.g. if variable 'var' is in the file, namelist could be ['var.npy']
"""
with zipfile.ZipFile(npz) as archive:
if namelist is None:
namelist = archive.namelist()
for name in namelist:
if not name.endswith('.npy'):
continue
npy = archive.open(name)
version = np.lib.format.read_magic(npy)
shape, fortran, dtype = np.lib.format._read_array_header(npy, version)
yield name[:-4], shape, dtype
def _get_shape(x):
if isinstance(x, (list, tuple)):
return _get_shape(x[0])
else:
return x.shape[0]
| ext/neuron/neuron/generators.py | 39,435 | get a list of files at the given path with the given extension
load a medical volume from one of a number of file types
taken from https://stackoverflow.com/a/43223420
Takes a path to an .npz file, which is a Zip archive of .npy files.
Generates a sequence of (name, shape, np.dtype).
namelist is a list with variable names, ending in '.npy'.
e.g. if variable 'var' is in the file, namelist could be ['var.npy']
# copy of keras.utils.np_utils.to_categorical, but with a boolean matrix instead of float
Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input.
#
# add a prior generator to a given generator
# with the number of patches in batch matching output of gen
generator for (image, segmentation)
TODO: should really use the volume generators for this
generate patches from volume for keras package
Yields:
patch: nd array of shape [batch_size, *patch_size], unless resized via nb_labels_reshape
generator for single volume (or volume patches) from a list of files
simple volume generator that loads a volume (via npy/mgz/nii/niigz), processes it,
and prepares it for keras model formats
if a patch size is passed, breaks the volume into patches and generates those
generator with (volume, binary_bit) (random order)
ONLY works with abtch size of 1 for now
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
generator with (volume, segmentation)
verbose is passed down to the base generators.py primitive generator (e.g. vol, here)
** kwargs are any named arguments for vol(...),
except verbose, data_proc_fn, ext, nb_labels_reshape and name
(which this function will control when calling vol())
generator that appends prior to (volume, segmentation) depending on input
e.g. could be ((volume, prior), segmentation)
default generator for slice-wise super resolution
generators for the neuron project
general imports third party imports local packages reload patchlib (it's often updated right now...) other neuron (this project) packages number of files to restart after name assumes each volume is fixed size get filenames at given paths set up restart cycle for volume files -- i.e. after how many volumes do we restart compute subvolume split process volume processing function that takes in one arg (the volume) relabeling array reshape to categorial format for keras, need labels whether to keep the volume size on categorical resizing name, optional number of files to restart after split the volume in patches? if so, get patch_size split the volume in patches? if so, get patch_stride get filenames at given paths compute subvolume split process volume check the number of files matches expected (if passed) iterate through files read next file (circular) print(file_name, " was loaded", vol_data.shape) process volume the original segmentation files have non-sequential relabel (i.e. some relabel are missing to avoid exploding our model, we only care about the relabel that exist. split volume into patches if necessary and yield add to feature yield previous batch if the new volume has different patch sizes add to batch of volume data, unless the batch is currently empty yield patch verbose and the volume patch size patch stride (spacing) number of labels for categorical resizing. 0 if no resizing whether to keep the volume size on categorical resizing batch size whether the generator should continue (re)-generating patches some parameter setup do while. if not infinite, will break at the end create patch generator go through the patch generator reshape output layer as categorical and prep proper size print(lpatch.shape, nb_labels_reshape, keep_vol_size, patch_size) +1 due to batch in first dim add this patch to the stack yield patch if not infinite generation, yield the last batch and break the while name, optional number of files to restart after subname of volume subname of segmentation get vol generator get seg generator, matching nb_files vol_files = [f.replace('norm', 'aseg') for f in _get_file_list(volpath, ext)] vol_files = [f.replace('orig', 'aseg') for f in vol_files] on next (while): get input and output (seg) vols was int8. Why? need float possibility... output input and output expect two folders in here processing parameters name, optional named arguments for vol(...), except verbose, data_proc_fn, ext, nb_labels_reshape and name (which this function will control when calling vol()) compute processing function get vol generators on next (while): build the random order stack shuffle for idx, gen in enumerate(generators):1,1,2 for categorical binary style z[0,0,0] = idx data = data*idx file-static, file-gen, location prior filename input or output get prior reshape for model assumes a npz filename passed in prior_file assumes a volume assume slices get the prior to have the right volume [x, y, z, nb_channels] prior generator generator loop generate input and output volumes generate prior batch file-static, file-gen, location prior filename input or output anything else you'd like to pass to vol() prepare the vol_seg add prior to output generator loop file-static, file-gen, location prior filename input or output prepare the vol_seg add prior to output generator loop file-static, file-gen, location prior filename input or output prepare the vol_seg get prior reshape for model assumes a npz filename passed in prior_file assumes a volume assume slices get the prior to have the right volume [x, y, z, nb_channels] prior generatorvariable_batch_size=True, this assert next(prior_gen) is None, "bad prior gen setup" generator loop generate input and output volumes generate prior batch with timer.Timer("with send?"): prior_batch = prior_gen.send(input_vol.shape[0]) name, optional number of files to restart after subname of volume subname of segmentation get vol generator on next (while): get input and output (seg) vols output input and output randomize init slice order (i.e. across entries per batch) given a volume compute the number of slices we'll need in a subvolume iterate through files compute some random slice prepare batches if essentially simulate a whole sparse volume for consistent inputs, and yield slices like that: if just random slices, get a batch of random starts from this volume and that's it. go slice by slice (overlapping regions) name, optional on next (while): output input and output Some internal use functions if keep_vol_size: lpatch = np.reshape(lpatch, [*patch_size, nb_labels_reshape]) by doing zeros, any label not in labels gets left to 0 | 7,204 | en | 0.7314 |
from adminsortable2.admin import SortableAdminMixin
from decimal import Decimal
from django.contrib import admin
from django.contrib.gis import admin as geo_admin
from import_export import fields
from import_export import widgets
from import_export.admin import ImportExportModelAdmin
from import_export.resources import ModelResource as ImportExportModelResource
from solo.admin import SingletonModelAdmin
from .models import Client
from .models import Contact
from .models import Contract
from .models import DashboardItem
from .models import Estimate
from .models import Invoice
from .models import Location
from .models import Log
from .models import Newsletter
from .models import Note
from .models import Profile
from .models import Project
from .models import Proposal
from .models import Report
from .models import Service
from .models import SettingsApp
from .models import SettingsCompany
from .models import SettingsContract
from .models import Task
from .models import Testimonial
from .models import Time
class BooleanWidget(widgets.Widget):
"""
Convert strings to boolean values
"""
def clean(self, value):
if value == 'Yes':
return True
else:
return False
class DecimalWidget(widgets.Widget):
"""
Convert strings to decimal values
"""
def clean(self, value):
if value:
return Decimal(value.replace(',', ''))
else:
return Decimal(0)
class UserWidget(widgets.Widget):
"""
"""
def clean(self, value):
return value
# Register your models here.
class ClientResource(ImportExportModelResource):
"""
"""
class Meta:
model = Client
# auto fill id? #295
# https://github.com/django-import-export/django-import-export/issues/295
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Client)
class ClientAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ClientResource
class ContactResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
class Meta:
model = Contact
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Contact)
class ContactAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ContactResource
@admin.register(Contract)
class ContractAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(DashboardItem)
class DashboardItemAdmin(SortableAdminMixin, admin.ModelAdmin):
"""
"""
class EstimateResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
amount = fields.Field(
column_name='estimate_amount',
attribute='amount',
widget=DecimalWidget())
subtotal = fields.Field(
column_name='subtotal', attribute='subtotal', widget=DecimalWidget())
document_id = fields.Field(
column_name='estimate_id',
attribute='document_id',
widget=DecimalWidget())
class Meta:
model = Estimate
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Estimate)
class EstimateAdmin(ImportExportModelAdmin):
"""
"""
resource_class = EstimateResource
class InvoiceResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
amount = fields.Field(
column_name='amount', attribute='amount', widget=DecimalWidget())
paid_amount = fields.Field(
column_name='paid_amount',
attribute='paid_amount',
widget=DecimalWidget())
subtotal = fields.Field(
column_name='subtotal', attribute='subtotal', widget=DecimalWidget())
balance = fields.Field(
column_name='balance', attribute='balance', widget=DecimalWidget())
document_id = fields.Field(
column_name='invoice_id',
attribute='document_id',
widget=DecimalWidget())
class Meta:
model = Invoice
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Invoice)
class InvoiceAdmin(ImportExportModelAdmin):
"""
"""
resource_class = InvoiceResource
@admin.register(Location)
class LocationAdmin(geo_admin.OSMGeoAdmin):
"""
"""
search_fields = ('name', )
@admin.register(Log)
class LogAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Newsletter)
class NewsletterAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Note)
class NoteAdmin(ImportExportModelAdmin):
"""
"""
class ProjectResource(ImportExportModelResource):
"""
"""
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
billable_amount = fields.Field(
column_name='billable_amount',
attribute='billable_amount',
widget=DecimalWidget())
budget = fields.Field(
column_name='budget', attribute='budget', widget=DecimalWidget())
budget_spent = fields.Field(
column_name='budget_spent',
attribute='budget_spent',
widget=DecimalWidget())
team_costs = fields.Field(
column_name='team_costs',
attribute='team_costs',
widget=DecimalWidget())
total_costs = fields.Field(
column_name='total_costs',
attribute='total_costs',
widget=DecimalWidget())
class Meta:
model = Project
exclude = ('task', 'team')
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Profile)
class ProfileAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Project)
class ProjectAdmin(ImportExportModelAdmin):
"""
"""
resource_class = ProjectResource
@admin.register(Proposal)
class ProposalAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Report)
class ReportAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(Service)
class ServiceAdmin(ImportExportModelAdmin):
"""
"""
@admin.register(SettingsApp)
class SettingsAppAdmin(SingletonModelAdmin):
"""
"""
@admin.register(SettingsCompany)
class SettingsCompanyAdmin(SingletonModelAdmin):
"""
"""
@admin.register(SettingsContract)
class SettingsContractAdmin(SingletonModelAdmin):
"""
"""
@admin.register(Testimonial)
class TestimonialAdmin(ImportExportModelAdmin):
"""
"""
prepopulated_fields = {"slug": ("name", )}
class TaskResource(ImportExportModelResource):
"""
"""
class Meta:
model = Task
exclude = ('unit', 'billable', 'active')
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Task)
class TaskAdmin(ImportExportModelAdmin):
"""
"""
resource_class = TaskResource
class TimeResource(ImportExportModelResource):
"""
"""
billable = fields.Field(
column_name='billable', attribute='billable', widget=BooleanWidget())
client = fields.Field(
column_name='client',
attribute='client',
widget=widgets.ForeignKeyWidget(Client, 'name'))
invoiced = fields.Field(
column_name='invoiced', attribute='invoiced', widget=BooleanWidget())
project = fields.Field(
column_name='project',
attribute='project',
widget=widgets.ForeignKeyWidget(Project, 'name'))
task = fields.Field(
column_name='task',
attribute='task',
widget=widgets.ForeignKeyWidget(Task, 'name'))
user = fields.Field(
column_name='user', attribute='user', widget=UserWidget())
class Meta:
model = Time
def get_instance(self, instance_loaders, row):
return False
def before_import(self, dataset, dry_run, file_name=None, user=None):
if dataset.headers:
dataset.headers = [
str(header).lower().strip() for header in dataset.headers
]
if 'id' not in dataset.headers:
dataset.headers.append('id')
@admin.register(Time)
class TimeAdmin(ImportExportModelAdmin):
"""
"""
resource_class = TimeResource
| aclarknet/database/admin.py | 10,275 | Convert strings to boolean values
Convert strings to decimal values
Register your models here. auto fill id? 295 https://github.com/django-import-export/django-import-export/issues/295 | 331 | en | 0.415009 |
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000146"
stations_name = "parl.2019-12-12/Version 1/west-norfolk.gov.uk-1572885849000-.tsv"
addresses_name = "parl.2019-12-12/Version 1/west-norfolk.gov.uk-1572885849000-.tsv"
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "10024107639":
rec["postcode"] = ""
rec["accept_suggestion"] = False
if record.addressline1 == "8 Lions Close":
rec["postcode"] = "PE38 0AT"
return rec
def station_record_to_dict(self, record):
# Dersingham Village Centre
if record.polling_place_id == "17923":
rec = super().station_record_to_dict(record)
rec["location"] = Point(0.512389, 52.843528, srid=4326)
return rec
# Windsor Park, KING`S LYNN
if record.polling_place_id == "17867":
rec = super().station_record_to_dict(record)
rec["location"] = Point(0.404833, 52.748556, srid=4326)
return rec
if record.polling_place_id == "18049":
record = record._replace(polling_place_postcode="PE14 9QH")
record = record._replace(polling_place_easting="0")
record = record._replace(polling_place_northing="0")
return super().station_record_to_dict(record)
| polling_stations/apps/data_collection/management/commands/import_kings_lynn.py | 1,685 | Dersingham Village Centre Windsor Park, KING`S LYNN | 51 | en | 0.887178 |
# -*- coding: utf-8 -*-
# Django settings for basic pinax project.
import os.path
import posixpath
import pinax
PINAX_ROOT = os.path.abspath(os.path.dirname(pinax.__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# tells Pinax to use the default theme
PINAX_THEME = "default"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "your_email@domain.com"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3", # Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle".
"NAME": "dev.db", # Or path to database file if using sqlite3.
"USER": "", # Not used with sqlite3.
"PASSWORD": "", # Not used with sqlite3.
"HOST": "", # Set to empty string for localhost. Not used with sqlite3.
"PORT": "", # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "US/Eastern"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory that holds static files like app media.
# Example: "/home/media/media.lawrence.com/apps/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/site_media/static/"
# Additional directories which hold static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "media"),
os.path.join(PINAX_ROOT, "media", PINAX_THEME),
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Make this unique, and don't share it with anybody.
SECRET_KEY = ""
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django_openid.consumer.SessionConsumer",
"django.contrib.messages.middleware.MessageMiddleware",
"pinax.apps.account.middleware.LocaleMiddleware",
"pagination.middleware.PaginationMiddleware",
"pinax.middleware.security.HideSensistiveFieldsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "basic_project.urls"
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PINAX_ROOT, "templates", PINAX_THEME),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"staticfiles.context_processors.static_url",
"pinax.core.context_processors.pinax_settings",
"pinax.apps.account.context_processors.account",
"notification.context_processors.notification",
"announcements.context_processors.site_wide_announcements",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.humanize",
"pinax.templatetags",
# external
"notification", # must be first
"staticfiles",
"debug_toolbar",
"mailer",
"uni_form",
"django_openid",
"ajax_validation",
"timezones",
"emailconfirmation",
"announcements",
"pagination",
"idios",
# Pinax
"pinax.apps.account",
"pinax.apps.signup_codes",
"pinax.apps.analytics",
# project
"about",
"profiles",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
EMAIL_BACKEND = "mailer.backend.DbBackend"
ABSOLUTE_URL_OVERRIDES = {
"auth.user": lambda o: "/profiles/profile/%s/" % o.username,
}
AUTH_PROFILE_MODULE = "profiles.Profile"
NOTIFICATION_LANGUAGE_MODULE = "account.Account"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
AUTHENTICATION_BACKENDS = [
"pinax.apps.account.auth_backends.AuthenticationBackend",
]
LOGIN_URL = "/account/login/" # @@@ any way this can be a url name?
LOGIN_REDIRECT_URLNAME = "what_next"
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
# URCHIN_ID = "ua-..."
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass
| pinax/projects/basic_project/settings.py | 6,312 | -*- coding: utf-8 -*- Django settings for basic pinax project. tells Pinax to use the default theme tells Pinax to serve media through the staticfiles app. ("Your Name", "your_email@domain.com"), Add "postgresql_psycopg2", "postgresql", "mysql", "sqlite3" or "oracle". Or path to database file if using sqlite3. Not used with sqlite3. Not used with sqlite3. Set to empty string for localhost. Not used with sqlite3. Set to empty string for default. Not used with sqlite3. Local time zone for this installation. Choices can be found here: http://en.wikipedia.org/wiki/List_of_tz_zones_by_name although not all choices may be available on all operating systems. If running in a Windows environment this must be set to the same as your system time zone. Language code for this installation. All choices can be found here: http://www.i18nguy.com/unicode/language-identifiers.html If you set this to False, Django will make some optimizations so as not to load the internationalization machinery. Absolute path to the directory that holds media. Example: "/home/media/media.lawrence.com/" URL that handles the media served from MEDIA_ROOT. Make sure to use a trailing slash if there is a path component (optional in other cases). Examples: "http://media.lawrence.com", "http://example.com/media/" Absolute path to the directory that holds static files like app media. Example: "/home/media/media.lawrence.com/apps/" URL that handles the static files like app media. Example: "http://media.lawrence.com" Additional directories which hold static files URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a trailing slash. Examples: "http://foo.com/media/", "/media/". Make this unique, and don't share it with anybody. List of callables that know how to import templates from various sources. Django external must be first Pinax project @@@ any way this can be a url name? URCHIN_ID = "ua-..." local_settings.py can be used to override environment-specific settings like database and email that differ between development and production. | 2,053 | en | 0.753954 |
"""Generates faucet config for given number of switches and number of devices per switch"""
import getopt
import sys
import yaml
from forch.utils import proto_dict
from forch.proto.faucet_configuration_pb2 import Interface, StackLink, Datapath, \
Vlan, FaucetConfig, LLDPBeacon, Stack
CORP_DP_ID = 273
T1_DP_ID_START = 177
T2_DP_ID_START = 1295
FLAT_DP_ID_START = 513
SETUP_VLAN = 171
TEST_VLAN = 272
FLAT_LINK_PORT_START = 6
T1_STACK_PORT_START = 100
T2_STACK_PORT_START = 50
ACCESS_PORT_START_DEFAULT = 101
FLAT_ACCESS_PORT_START = 1
TAP_PORT = 4
FAUCET_EGRESS_PORT = 28
FLAT_EGRESS_PORT = 50
CORP_EGRESS_PORT = 10
LACP_MODE = 3
T1_DP_MAC_PREFIX = '0e:00:00:00:01:'
T2_DP_MAC_PREFIX = '0e:00:00:00:02:'
FLAT = 'flat'
CORP = 'corp'
STACK = 'stack'
T1_DP = 't1'
T2_DP = 't2'
# pylint: disable=protected-access
# pylint: disable=too-many-arguments
class FaucetConfigGenerator():
"""Class for generating faucet config for given switches and devices per switch"""
def _build_dp_interfaces(self, dp_index, **kwargs):
interfaces = {}
# add egress interface
egress_port = kwargs.get('egress_port')
tagged_vlans = kwargs.get('tagged_vlans')
lacp = kwargs.get('lacp')
if egress_port:
self._add_egress_interface(interfaces, egress_port, tagged_vlans, lacp)
# add tap interface
tap_vlan = kwargs.get('tap_vlan')
if tap_vlan:
self._add_tap_interface(interfaces, tap_vlan)
# add flat link interfaces
dps = kwargs.get('dps')
if dps:
self._add_flat_link_interfaces(interfaces, dps, dp_index)
# add stack interfaces linking from t1 to t2 switches
t2_dps = kwargs.get('t2_dps')
if t2_dps:
self._add_t1_stack_interfaces(interfaces, dp_index, t2_dps)
# add stack interfaces linking from t2 to t1 switches
t1_dps = kwargs.get('t1_dps')
if t1_dps:
self._add_t2_stack_interfaces(interfaces, dp_index, t1_dps)
# add access interfaces
access_ports = kwargs.get('access_ports')
access_port_start = kwargs.get('access_port_start', ACCESS_PORT_START_DEFAULT)
native_vlan = kwargs.get('native_vlan')
port_acl = kwargs.get('port_acl')
if access_ports:
self._add_access_interfaces(
interfaces, access_ports, access_port_start, native_vlan, port_acl)
return interfaces
def _add_egress_interface(self, interfaces, egress_port, tagged_vlans, lacp):
if lacp:
interfaces[egress_port] = Interface(
description='egress', lacp=LACP_MODE, tagged_vlans=tagged_vlans)
else:
interfaces[egress_port] = Interface(
description='egress', tagged_vlans=tagged_vlans)
def _add_tap_interface(self, interfaces, tap_vlan):
interfaces[TAP_PORT] = Interface(description='tap', tagged_vlans=[tap_vlan])
def _add_flat_link_interfaces(self, interfaces, dps, dp_index):
if dp_index < len(dps) - 1:
next_dp = dps[dp_index + 1]
next_port = FLAT_LINK_PORT_START + dp_index
description = ("to %s port %s" % (next_dp, next_port))
interfaces[next_port] = Interface(
description=description, stack=StackLink(dp=next_dp, port=next_port))
if dp_index > 0:
prev_dp = dps[dp_index - 1]
prev_port = FLAT_LINK_PORT_START + (len(dps) + dp_index - 1) % len(dps)
description = ("to %s port %s" % (prev_dp, prev_port))
interfaces[prev_port] = Interface(
description=description, stack=StackLink(dp=prev_dp, port=prev_port))
def _add_t1_stack_interfaces(self, interfaces, dp_index, t2_dps):
t2_port = T2_STACK_PORT_START + dp_index * 2
for index, t2_dp in enumerate(t2_dps):
port = T1_STACK_PORT_START + index
description = ("to %s port %s" % (t2_dp, t2_port))
interfaces[port] = Interface(
description=description, stack=StackLink(dp=t2_dp, port=t2_port))
def _add_t2_stack_interfaces(self, interfaces, dp_index, t1_dps):
t1_port = T1_STACK_PORT_START + dp_index
for index, t1_dp in enumerate(t1_dps):
port = T2_STACK_PORT_START + index * 2
description = ('to %s port %s' % (t1_dp, t1_port))
interfaces[port] = Interface(
description=description, stack=StackLink(dp=t1_dp, port=t1_port))
def _add_access_interfaces(self, interfaces, access_ports, access_port_start, native_vlan,
port_acl):
for index in range(access_ports):
interfaces[index + access_port_start] = Interface(
description='IoT Device', native_vlan=native_vlan, acl_in=port_acl,
max_hosts=1)
def _build_datapath_config(self, dp_id, interfaces, mac=None):
lldp_beacon = LLDPBeacon(max_per_interval=5, send_interval=5)
stack = Stack(priority=1)
return Datapath(
dp_id=dp_id, faucet_dp_mac=mac, hardware='Generic',
lacp_timeout=5, lldp_beacon=lldp_beacon, interfaces=interfaces, stack=stack)
def _generate_dp_mac(self, dp_type, dp_index):
if dp_type == T1_DP:
return T1_DP_MAC_PREFIX + "{:02x}".format(dp_index+1)
if dp_type == T2_DP:
return T2_DP_MAC_PREFIX + "{:02x}".format(dp_index+1)
raise Exception('Unknown dp_type: %s' % dp_type)
def create_scale_faucet_config(self, t1_switches, t2_switches, access_ports):
"""Create Faucet config with stacking topology"""
setup_vlan = SETUP_VLAN
test_vlan = TEST_VLAN
vlans = {
setup_vlan: Vlan(description='Faucet IoT'),
test_vlan: Vlan(description='Orchestrated Testing')
}
t1_dps = [('nz-kiwi-t1sw%s' % (dp_index + 1)) for dp_index in range(t1_switches)]
t2_dps = [('nz-kiwi-t2sw%s' % (dp_index + 1)) for dp_index in range(t2_switches)]
dps = {}
for dp_index, dp_name in enumerate(t1_dps):
tap_vlan = test_vlan if not dp_index else None
interfaces = self._build_dp_interfaces(
dp_index, dps=t1_dps, t2_dps=t2_dps, tagged_vlans=[setup_vlan],
tap_vlan=tap_vlan, egress_port=FAUCET_EGRESS_PORT, lacp=True)
dps[dp_name] = self._build_datapath_config(
T1_DP_ID_START + dp_index, interfaces, self._generate_dp_mac(T1_DP, dp_index))
for dp_index, dp_name in enumerate(t2_dps):
interfaces = self._build_dp_interfaces(
dp_index, t1_dps=t1_dps, access_ports=access_ports, native_vlan=setup_vlan,
port_acl='uniform_acl', lacp=True)
dps[dp_name] = self._build_datapath_config(
T2_DP_ID_START + dp_index, interfaces, self._generate_dp_mac(T2_DP, dp_index))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
def create_flat_faucet_config(self, num_switches, num_access_ports):
"""Create Faucet config with flat topology"""
setup_vlan = SETUP_VLAN
switches = [('sw%s' % (sw_num + 1)) for sw_num in range(num_switches)]
dps = {}
vlans = {setup_vlan: Vlan(description='Faucet IoT')}
for sw_num, sw_name in enumerate(switches):
interfaces = self._build_dp_interfaces(
sw_num, dps=switches, egress_port=FAUCET_EGRESS_PORT, tagged_vlans=[setup_vlan],
access_ports=num_access_ports, native_vlan=setup_vlan, port_acl='uniform_acl',
access_port_start=FLAT_ACCESS_PORT_START, lacp=True)
dps[sw_name] = self._build_datapath_config(
FLAT_DP_ID_START + sw_num, interfaces, self._generate_dp_mac(T2_DP, sw_num))
return FaucetConfig(dps=dps, version=2, include=['uniform.yaml'], vlans=vlans)
def create_corp_faucet_config(self):
"""Create Faucet config for corp network"""
setup_vlan = SETUP_VLAN
switch = 'corp'
dps = {}
interfaces = self._build_dp_interfaces(
CORP_DP_ID, tagged_vlans=[setup_vlan], access_ports=1, access_port_start=1,
native_vlan=setup_vlan, egress_port=CORP_EGRESS_PORT)
dps[switch] = self._build_datapath_config(CORP_DP_ID, interfaces)
return FaucetConfig(dps=dps, version=2)
def main(argv):
"""main method for standalone run"""
config_generator = FaucetConfigGenerator()
filepath = '/tmp/faucet_config_dump'
egress = 2
access = 3
devices = 1
topo_type = STACK
argv = argv[1:]
help_msg = """
<python3> build_config.py -e <egress_switches> -a <access_switches> -d <devices per switch>
-p <config path> -t <topology type (flat, corp, stack)>
"""
try:
opts, _ = getopt.getopt(
argv, 'he:a:d:p:t:', ['egress=', 'access=', 'devices=', 'path=', 'type='])
except getopt.GetoptError:
print(help_msg)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help_msg)
sys.exit()
elif opt in ('-e', '--egress'):
egress = int(arg)
elif opt in ('-a', '--access'):
access = int(arg)
elif opt in ('-d', '--devices'):
devices = int(arg)
elif opt in ('-p', '--path'):
filepath = arg
elif opt in ('-t', '--type'):
topo_type = arg
if topo_type == FLAT:
faucet_config = config_generator.create_flat_faucet_config(access, devices)
elif topo_type == CORP:
faucet_config = config_generator.create_corp_faucet_config()
elif topo_type == STACK:
faucet_config = config_generator.create_scale_faucet_config(egress, access, devices)
else:
raise Exception('Unkown topology type: %s' % topo_type)
config_map = proto_dict(faucet_config)
with open(filepath, 'w') as config_file:
yaml.dump(config_map, config_file)
if __name__ == '__main__':
main(sys.argv)
| testing/python_lib/build_config.py | 10,090 | Class for generating faucet config for given switches and devices per switch
Create Faucet config for corp network
Create Faucet config with flat topology
Create Faucet config with stacking topology
main method for standalone run
Generates faucet config for given number of switches and number of devices per switch
pylint: disable=protected-access pylint: disable=too-many-arguments add egress interface add tap interface add flat link interfaces add stack interfaces linking from t1 to t2 switches add stack interfaces linking from t2 to t1 switches add access interfaces | 575 | en | 0.563435 |
import logging
import yaml
from scanapi.config_loader import load_config_file
from scanapi.errors import (
BadConfigurationError,
EmptyConfigFileError,
InvalidKeyError,
InvalidPythonCodeError,
)
from scanapi.exit_code import ExitCode
from scanapi.reporter import Reporter
from scanapi.session import session
from scanapi.settings import settings
from scanapi.tree import EndpointNode
logger = logging.getLogger(__name__)
def scan():
"""Caller function that tries to scans the file and write the report."""
spec_path = settings["spec_path"]
try:
api_spec = load_config_file(spec_path)
except FileNotFoundError as e:
error_message = f"Could not find API spec file: {spec_path}. {str(e)}"
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except EmptyConfigFileError as e:
error_message = f"API spec file is empty. {str(e)}"
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
except yaml.YAMLError as e:
error_message = "Error loading specification file."
error_message = "{}\nPyYAML: {}".format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
root_node = EndpointNode(api_spec)
results = root_node.run()
except (InvalidKeyError, KeyError, InvalidPythonCodeError,) as e:
error_message = "Error loading API spec."
error_message = "{} {}".format(error_message, str(e))
logger.error(error_message)
raise SystemExit(ExitCode.USAGE_ERROR)
try:
write_report(results)
except (BadConfigurationError, InvalidPythonCodeError) as e:
logger.error(e)
raise SystemExit(ExitCode.USAGE_ERROR)
session.exit()
def write_report(results):
"""Constructs a Reporter object and calls the write method of Reporter to
push the results to a file.
"""
reporter = Reporter(settings["output_path"], settings["template"])
reporter.write(results)
| scanapi/scan.py | 2,045 | Caller function that tries to scans the file and write the report.
Constructs a Reporter object and calls the write method of Reporter to
push the results to a file. | 165 | en | 0.905373 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
import time
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox', oks_sigmas=None):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections, iouType=iou_type)
if oks_sigmas is not None:
self.params.kpt_oks_sigmas = oks_sigmas
if agnostic_mode:
self.params.useCats = 0
self._iou_type = iou_type
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = {}
if self._iou_type in ['bbox', 'segm']:
summary_metrics = OrderedDict([('Precision/mAP', self.stats[0]),
('Precision/mAP@.50IOU', self.stats[1]),
('Precision/mAP@.75IOU', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])])
elif self._iou_type == 'keypoints':
category_id = self.GetCategoryIdList()[0]
category_name = self.GetCategory(category_id)['name']
summary_metrics = OrderedDict([])
summary_metrics['Precision/mAP ByCategory/{}'.format(
category_name)] = self.stats[0]
summary_metrics['Precision/mAP@.50IOU ByCategory/{}'.format(
category_name)] = self.stats[1]
summary_metrics['Precision/mAP@.75IOU ByCategory/{}'.format(
category_name)] = self.stats[2]
summary_metrics['Precision/mAP (medium) ByCategory/{}'.format(
category_name)] = self.stats[3]
summary_metrics['Precision/mAP (large) ByCategory/{}'.format(
category_name)] = self.stats[4]
summary_metrics['Recall/AR@1 ByCategory/{}'.format(
category_name)] = self.stats[5]
summary_metrics['Recall/AR@10 ByCategory/{}'.format(
category_name)] = self.stats[6]
summary_metrics['Recall/AR@100 ByCategory/{}'.format(
category_name)] = self.stats[7]
summary_metrics['Recall/AR@100 (medium) ByCategory/{}'.format(
category_name)] = self.stats[8]
summary_metrics['Recall/AR@100 (large) ByCategory/{}'.format(
category_name)] = self.stats[9]
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
rle = mask.encode(np.asfortranarray(masks))
rle['counts'] = six.ensure_str(rle['counts'])
return rle
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_keypoints=None,
groundtruth_keypoint_visibilities=None,
groundtruth_masks=None,
groundtruth_is_crowd=None,
groundtruth_area=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
has_keypoints = groundtruth_keypoints is not None
has_keypoint_visibilities = groundtruth_keypoint_visibilities is not None
if has_keypoints and not has_keypoint_visibilities:
groundtruth_keypoint_visibilities = np.full(
(num_boxes, groundtruth_keypoints.shape[1]), 2)
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
if groundtruth_area is not None and groundtruth_area[i] > 0:
area = float(groundtruth_area[i])
else:
area = float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1]))
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area': area,
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
if has_keypoints:
keypoints = groundtruth_keypoints[i]
visibilities = np.reshape(groundtruth_keypoint_visibilities[i], [-1])
coco_keypoints = []
num_valid_keypoints = 0
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
if int(visibility) > 0:
num_valid_keypoints = num_valid_keypoints + 1
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_valid_keypoints
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes,
detection_keypoints=None,
detection_keypoint_visibilities=None):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
export_dict = {
'image_id':
image_id,
'category_id':
int(detection_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score':
float(detection_scores[i]),
}
if detection_keypoints is not None:
keypoints = detection_keypoints[i]
num_keypoints = keypoints.shape[0]
if detection_keypoint_visibilities is None:
detection_keypoint_visibilities = np.full((num_boxes, num_keypoints),
2)
visibilities = np.reshape(detection_keypoint_visibilities[i], [-1])
coco_keypoints = []
for keypoint, visibility in zip(keypoints, visibilities):
# Convert from [y, x] to [x, y] as mandated by COCO.
coco_keypoints.append(float(keypoint[1]))
coco_keypoints.append(float(keypoint[0]))
coco_keypoints.append(int(visibility))
export_dict['keypoints'] = coco_keypoints
export_dict['num_keypoints'] = num_keypoints
detections_list.append(export_dict)
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
| research/object_detection/metrics/coco_tools.py | 43,170 | Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
Wrapper for the pycocotools COCO class.
Computes detection/keypoint metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels). NOTE: not present for 'keypoints'
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections. NOTE: not present for 'keypoints'
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
detection_keypoints: optional float numpy array of keypoints
with shape [num_detections, num_keypoints, 2].
detection_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_detections, num_keypoints]. Integer is
treated as an enum with 0=not labels, 1=labeled but not visible and
2=labeled and visible.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_keypoints: optional float numpy array of keypoints
with shape [num_gt_boxes, num_keypoints, 2].
groundtruth_keypoint_visibilities: optional integer numpy array of keypoint
visibilities with shape [num_gt_boxes, num_keypoints]. Integer is treated
as an enum with 0=not labels, 1=labeled but not visible and 2=labeled and
visible.
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
groundtruth_area: numpy array (float32) with shape [num_gt_boxes]. If
provided, then the area values (in the original absolute coordinates) will
be populated instead of calculated from bounding box coordinates.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
Returns true if COCO Eval is configured to evaluate in agnostic mode.
Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
Returns list of valid category ids.
Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox', `segm`,
`keypoints`.
oks_sigmas: Float numpy array holding the OKS variances for keypoints.
Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Kept for backward compatilbility Convert from [y, x] to [x, y] as mandated by COCO. For reasons internal to the COCO API, it is important that annotation ids are not equal to zero; we thus start counting from 1. Convert from [y, x] to [x, y] as mandated by COCO. Adds extra ones to indicate the visibility for each keypoint as is recommended by MSCOCO. | 20,226 | en | 0.763074 |
import os
from PySide2 import QtWidgets
from mapclientplugins.filechooserstep.ui_configuredialog import Ui_ConfigureDialog
INVALID_STYLE_SHEET = 'background-color: rgba(239, 0, 0, 50)'
DEFAULT_STYLE_SHEET = ''
class ConfigureDialog(QtWidgets.QDialog):
"""
Configure dialog to present the user with the options to configure this step.
"""
def __init__(self, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self._ui = Ui_ConfigureDialog()
self._ui.setupUi(self)
self._workflow_location = None
# Keep track of the previous identifier so that we can track changes
# and know how many occurrences of the current identifier there should
# be.
self._previousIdentifier = ''
# Set a place holder for a callable that will get set from the step.
# We will use this method to decide whether the identifier is unique.
self.identifierOccursCount = None
self._previousLocation = ''
self._makeConnections()
def _makeConnections(self):
self._ui.lineEdit0.textChanged.connect(self.validate)
self._ui.lineEditFileLocation.textChanged.connect(self.validate)
self._ui.pushButtonFileChooser.clicked.connect(self._fileChooserClicked)
def _fileChooserClicked(self):
# Second parameter returned is the filter chosen
location, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Select File Location', self._previousLocation)
if location:
self._previousLocation = location
display_location = self._output_location(location)
self._ui.lineEditFileLocation.setText(display_location)
def _output_location(self, location=None):
if location is None:
display_path = self._ui.lineEditFileLocation.text()
else:
display_path = location
if self._workflow_location and os.path.isabs(display_path):
display_path = os.path.relpath(display_path, self._workflow_location)
return display_path
def setWorkflowLocation(self, location):
self._workflow_location = location
def accept(self):
"""
Override the accept method so that we can confirm saving an
invalid configuration.
"""
result = QtWidgets.QMessageBox.Yes
if not self.validate():
result = QtWidgets.QMessageBox.warning(self, 'Invalid Configuration',
'This configuration is invalid. '
' Unpredictable behaviour may result if you choose \'Yes\','
' are you sure you want to save this configuration?)',
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
QtWidgets.QDialog.accept(self)
def validate(self):
"""
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
"""
# Determine if the current identifier is unique throughout the workflow
# The identifierOccursCount method is part of the interface to the workflow framework.
value = self.identifierOccursCount(self._ui.lineEdit0.text())
valid = (value == 0) or (value == 1 and self._previousIdentifier == self._ui.lineEdit0.text())
self._ui.lineEdit0.setStyleSheet(DEFAULT_STYLE_SHEET if valid else INVALID_STYLE_SHEET)
non_empty = len(self._ui.lineEditFileLocation.text())
file_path = self._output_location()
if self._workflow_location:
file_path = os.path.join(self._workflow_location, file_path)
location_valid = non_empty and os.path.isfile(file_path)
self._ui.lineEditFileLocation.setStyleSheet(DEFAULT_STYLE_SHEET if location_valid else INVALID_STYLE_SHEET)
return valid and location_valid
def getConfig(self):
"""
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
"""
self._previousIdentifier = self._ui.lineEdit0.text()
config = {'identifier': self._ui.lineEdit0.text(), 'File': self._output_location()}
if self._previousLocation:
config['previous_location'] = os.path.relpath(self._previousLocation, self._workflow_location)
else:
config['previous_location'] = ''
return config
def setConfig(self, config):
"""
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
"""
self._previousIdentifier = config['identifier']
self._ui.lineEdit0.setText(config['identifier'])
self._ui.lineEditFileLocation.setText(config['File'])
if 'previous_location' in config:
self._previousLocation = os.path.join(self._workflow_location, config['previous_location'])
| mapclientplugins/filechooserstep/configuredialog.py | 5,404 | Configure dialog to present the user with the options to configure this step.
Override the accept method so that we can confirm saving an
invalid configuration.
Get the current value of the configuration from the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
Set the current value of the configuration for the dialog. Also
set the _previousIdentifier value so that we can check uniqueness of the
identifier over the whole of the workflow.
Validate the configuration dialog fields. For any field that is not valid
set the style sheet to the INVALID_STYLE_SHEET. Return the outcome of the
overall validity of the configuration.
Keep track of the previous identifier so that we can track changes and know how many occurrences of the current identifier there should be. Set a place holder for a callable that will get set from the step. We will use this method to decide whether the identifier is unique. Second parameter returned is the filter chosen Determine if the current identifier is unique throughout the workflow The identifierOccursCount method is part of the interface to the workflow framework. | 1,191 | en | 0.841697 |
from problem import Problem
class DistinctPowers(Problem, name="Distinct powers", expected=9183):
@Problem.solution()
def brute_force(self):
# Good ol fashion set comprehension
return len({a ** b for a in range(2, 101) for b in range(2, 101)})
| problems/p029.py | 270 | Good ol fashion set comprehension | 33 | en | 0.735799 |
# terrascript/data/mrcrilly/awx.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:44 UTC)
import terrascript
class awx_credential(terrascript.Data):
pass
class awx_credential_azure_key_vault(terrascript.Data):
pass
class awx_credentials(terrascript.Data):
pass
__all__ = [
"awx_credential",
"awx_credential_azure_key_vault",
"awx_credentials",
]
| terrascript/data/mrcrilly/awx.py | 397 | terrascript/data/mrcrilly/awx.py Automatically generated by tools/makecode.py (24-Sep-2021 15:12:44 UTC) | 104 | en | 0.415945 |
#
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import requests
from urllib.parse import urlencode
logger = logging.getLogger(__name__)
class IBMIAMClient:
def __init__(self, iam_config, cf_endpoint, cf_namespace):
self.iam_api_key = iam_config.get('api_key', None)
self.iam_auth_endpoint = iam_config['ibm_auth_endpoint']
self.cf_endpoint = cf_endpoint
self.cf_namespace = cf_namespace
def get_iam_token(self):
data = urlencode({'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': self.iam_api_key})
headers = {
'content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
res = requests.post(self.iam_auth_endpoint, data=data, headers=headers)
if res.status_code != 200:
raise RuntimeError("Error: http code {} while retrieving IAM token for API key.".format(res.status_code))
bearer_response = res.json()
bearer_token = bearer_response['access_token']
logger.debug(bearer_token)
return bearer_token
def get_function_namespace_id(self, iam_token):
logger.debug("Getting name space id for {}".format(self.cf_namespace))
headers = {
'content-type': 'application/json',
'Accept': 'application/json',
'Authorization': iam_token
}
url = '/'.join([self.cf_endpoint, 'api', 'v1', 'namespaces'])
res = requests.get(url, headers=headers)
if res.status_code != 200:
raise RuntimeError("Error: http code {} while listing namespaces.".format(res.status_code))
namespaces = res.json()
for current_namespace in namespaces['namespaces']:
if 'name' in current_namespace and current_namespace['name'] == self.cf_namespace:
logger.debug("Found name space id {} for {}".format(current_namespace['id'], self.cf_namespace))
return current_namespace['id']
raise Exception("No IBM Cloud Functions namespace \"{}\" found.".format(self.cf_namespace))
| pywren_ibm_cloud/libs/ibm_cloudfunctions/iam.py | 2,642 | (C) Copyright IBM Corp. 2019 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 550 | en | 0.865228 |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2015, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import division
import numpy as np
from .normalise import normaliselabels
from .base import supervised_model
__all__ = ['normaliselabels', 'ctransforms']
class threshold_model(object):
'''
threshold_model
Attributes
----------
threshold : float
threshold value
'''
def __init__(self, threshold=.5):
self.threshold = .5
def apply(self, f):
return f >= self.threshold
def __repr__(self):
return 'threshold_model({})'.format(self.threshold)
__str__ = __repr__
class fixed_threshold_learner(object):
def __init__(self, threshold=.5):
self.threshold = threshold
def train(self, features, labels, **kwargs):
return threshold_model(self.threshold)
def __repr__(self):
return 'fixed_threshold_learner({})'.format(self.threshold)
__str__ = __repr__
class ctransforms_model(supervised_model):
'''
model = ctransforms_model(models)
A model that consists of a series of transformations.
See Also
--------
ctransforms
'''
def __init__(self, models):
self.models = models
def apply_many(self, features):
if len(features) == 0:
return features
for m in self.models:
features = m.apply_many(features)
return features
def __repr__(self):
return 'ctransforms_model({})'.format(self.models)
__str__ = __repr__
def __getitem__(self, ix):
return self.models[ix]
def apply(self,features):
for T in self.models:
features = T.apply(features)
return features
class ctransforms(object):
'''
ctransf = ctransforms(c0, c1, c2, ...)
Concatenate transforms.
'''
def __init__(self,*args):
self.transforms = args
def train(self, features, labels, **kwargs):
models = []
model = None
for T in self.transforms:
if model is not None:
features = np.array([model.apply(f) for f in features])
model = T.train(features, labels, **kwargs)
models.append(model)
return ctransforms_model(models)
def __repr__(self):
return 'ctransforms(*{})'.format(self.transforms)
__str__ = __repr__
def set_option(self, opt, val):
idx, opt = opt
self.transforms[idx].set_option(opt,val)
| milk/supervised/classifier.py | 3,597 | ctransf = ctransforms(c0, c1, c2, ...)
Concatenate transforms.
model = ctransforms_model(models)
A model that consists of a series of transformations.
See Also
--------
ctransforms
threshold_model
Attributes
----------
threshold : float
threshold value
-*- coding: utf-8 -*- Copyright (C) 2008-2015, Luis Pedro Coelho <luis@luispedro.org> vim: set ts=4 sts=4 sw=4 expandtab smartindent: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,431 | en | 0.831759 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
r"""
Verify that we execute TeX in a subdirectory (if that's where the document
resides) by checking that all the auxiliary files get created there and
not in the top-level directory. Test this when variantDir is used
Add use of \include and \includegraphics from within the included file
Also check that we find files
Test case courtesy Joel B. Mohler.
"""
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find 'latex'; skipping test.\n")
pdflatex = test.where_is('pdflatex')
if not pdflatex:
test.skip_test("Could not find 'pdflatex'; skipping test.\n")
test.subdir('docs')
test.subdir(['docs','content'])
test.subdir(['docs','fig'])
test.write('SConstruct', """\
import os
env = Environment(TOOLS = ['tex', 'pdftex'])
env.VariantDir('build', 'docs', duplicate=0)
pdf = env.PDF('build/main.tex')
""")
test.write(['docs','main.tex'],
r"""\documentclass{article}
\usepackage{makeidx}
\makeindex
\begin{document}
Hi there.
\index{info}
\include{content/chapter}
\printindex{}
\end{document}
""")
test.write(['docs','content','chapter.tex'],
r"""Sub-document 1
\input{content/subchap}
""")
test.write(['docs','content','subchap.tex'], """\
Sub-chapter 2
""")
#test.run(arguments = '.')
#test.run(arguments = '.', stderr=None, stdout=None)
# next line tests that side effect nodes get disambiguated
# and their directories created in a variantDir before
# the builder tries to populate them and fails
test.run(arguments = 'build/main.pdf', stderr=None, stdout=None)
test.must_exist(['build', 'main.aux'])
test.must_exist(['build', 'main.fls'])
test.must_exist(['build', 'main.idx'])
test.must_exist(['build', 'main.ilg'])
test.must_exist(['build', 'main.ind'])
test.must_exist(['build', 'main.log'])
test.must_exist(['build', 'main.pdf'])
test.must_exist(['build', 'content', 'chapter.aux'])
test.must_not_exist('main.aux')
test.must_not_exist('main.dvi')
test.must_not_exist('main.idx')
test.must_not_exist('main.ilg')
test.must_not_exist('main.ind')
test.must_not_exist('main.log')
test.must_not_exist('main.pdf')
test.must_not_exist(['docs', 'main.aux'])
test.must_not_exist(['docs', 'main.dvi'])
test.must_not_exist(['docs', 'main.idx'])
test.must_not_exist(['docs', 'main.ilg'])
test.must_not_exist(['docs', 'main.ind'])
test.must_not_exist(['docs', 'main.log'])
test.must_not_exist(['docs', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'main.aux'])
test.must_not_exist(['docs', 'content', 'main.dvi'])
test.must_not_exist(['docs', 'content', 'main.idx'])
test.must_not_exist(['docs', 'content', 'main.ilg'])
test.must_not_exist(['docs', 'content', 'main.ind'])
test.must_not_exist(['docs', 'content', 'main.log'])
test.must_not_exist(['docs', 'content', 'main.pdf'])
test.must_not_exist(['docs', 'content', 'chapter.aux'])
test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.write(['docs','content', 'subchap.tex'], """\
Sub-document 2a
""")
test.not_up_to_date(arguments = '.')
#test.up_to_date(arguments = '.', stderr=None, stdout=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| test/TEX/subdir_variantdir_include2.py | 4,355 | !/usr/bin/env python __COPYRIGHT__ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.test.run(arguments = '.')test.run(arguments = '.', stderr=None, stdout=None) next line tests that side effect nodes get disambiguated and their directories created in a variantDir before the builder tries to populate them and failstest.up_to_date(arguments = '.', stderr=None, stdout=None) Local Variables: tab-width:4 indent-tabs-mode:nil End: vim: set expandtab tabstop=4 shiftwidth=4: | 1,442 | en | 0.817864 |
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read()
setup_requirements = ['setuptools_scm', ]
test_requirements = ['pytest>=3', 'pytest-runner']
setup(
author="USDA ARS Northwest Watershed Research Center",
author_email='snow@ars.usda.gov',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Take 2 for pysnobal in pure python",
entry_points={
'console_scripts': [
'pysnobal=pysnobal.cli:main',
],
},
install_requires=requirements,
license="CC0 1.0",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
keywords='pysnobal',
name='pysnobal',
packages=find_packages(include=['pysnobal', 'pysnobal.*']),
package_data={
'pysnobal': [
'./pysnobal_core_config.ini'
]
},
use_scm_version={
'local_scheme': 'node-and-date',
},
setup_requires=setup_requirements,
test_suite='pysnobal.tests',
tests_require=test_requirements,
url='https://github.com/scotthavens/pysnobal',
zip_safe=False,
)
| setup.py | 1,782 | The setup script.
!/usr/bin/env python | 39 | en | 0.349468 |
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-bitcoinlib.
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.AMBKH_compute_key.restype = ctypes.c_int
ssl.AMBKH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.AMBKSA_sign.restype = ctypes.c_int
ssl.AMBKSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.AMBKSA_verify.restype = ctypes.c_int
ssl.AMBKSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with AMBKSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ambkh_key(self, other_pubkey):
ambkh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.AMBKH_compute_key(ctypes.pointer(ambkh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ambkh_key(): AMBKH_compute_key() failed')
return ambkh_keybuffer.raw
def get_ambkh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ambkh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.AMBKSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.AMBKSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.AMBKSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| test/functional/test_framework/key.py | 8,500 | Wrapper around OpenSSL's EC_KEY
An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
Verify a DER signature
ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-bitcoinlib.
Copyright (c) 2011 Sam Rushing this specifies the curve used with AMBKSA. from openssl/obj_mac.h Thx to Sam Devlin for the ctypes magic 64-bit fix. FIXME: be warned it's not clear what the kdf should be as a default FIXME: need unit tests for below cases Always have represent as b'<secret>' so test cases don't have to change for py2/3 | 781 | en | 0.727319 |
# -*- coding: utf-8 -*-
import errno
import os
import re
import hashlib
import tempfile
import sys
import shutil
import logging
import click
import crayons
import delegator
import parse
import requests
import six
import stat
import warnings
try:
from weakref import finalize
except ImportError:
try:
from .vendor.backports.weakref import finalize
except ImportError:
class finalize(object):
def __init__(self, *args, **kwargs):
logging.warn('weakref.finalize unavailable, not cleaning...')
def detach(self):
return False
from time import time
logging.basicConfig(level=logging.ERROR)
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
except ImportError:
try:
from .vendor.pathlib2 import Path
except ImportError:
pass
from distutils.spawn import find_executable
from contextlib import contextmanager
from .patched.piptools.resolver import Resolver
from .patched.piptools.repositories.pypi import PyPIRepository
from .patched.piptools.scripts.compile import get_pip_command
from .patched.piptools import logging as piptools_logging
from .patched.piptools.exceptions import NoCandidateFound
from .vendor.pip9.download import is_archive_file
from .vendor.pip9.exceptions import DistributionNotFound
from .vendor.pip9.index import Link
from .vendor.pip9._vendor.requests.exceptions import HTTPError, ConnectionError
from .pep508checker import lookup
from .environments import PIPENV_MAX_ROUNDS, PIPENV_CACHE_DIR
if six.PY2:
class ResourceWarning(Warning):
pass
specifiers = [k for k in lookup.keys()]
# List of version control systems we support.
VCS_LIST = ('git', 'svn', 'hg', 'bzr')
SCHEME_LIST = ('http://', 'https://', 'ftp://', 'ftps://', 'file://')
requests = requests.Session()
def get_requirement(dep):
from .vendor.pip9.req.req_install import _strip_extras, Wheel
from .vendor import requirements
"""Pre-clean requirement strings passed to the requirements parser.
Ensures that we can accept both local and relative paths, file and VCS URIs,
remote URIs, and package names, and that we pass only valid requirement strings
to the requirements parser. Performs necessary modifications to requirements
object if the user input was a local relative path.
:param str dep: A requirement line
:returns: :class:`requirements.Requirement` object
"""
path = None
uri = None
cleaned_uri = None
editable = False
dep_link = None
# check for editable dep / vcs dep
if dep.startswith('-e '):
editable = True
# Use the user supplied path as the written dependency
dep = dep.split(' ', 1)[1]
# Split out markers if they are present - similar to how pip does it
# See pip9.req.req_install.InstallRequirement.from_line
if not any(dep.startswith(uri_prefix) for uri_prefix in SCHEME_LIST):
marker_sep = ';'
else:
marker_sep = '; '
if marker_sep in dep:
dep, markers = dep.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
# Strip extras from the requirement so we can make a properly parseable req
dep, extras = _strip_extras(dep)
# Only operate on local, existing, non-URI formatted paths which are installable
if is_installable_file(dep):
dep_path = Path(dep)
dep_link = Link(dep_path.absolute().as_uri())
if dep_path.is_absolute() or dep_path.as_posix() == '.':
path = dep_path.as_posix()
else:
path = get_converted_relative_path(dep)
dep = dep_link.egg_fragment if dep_link.egg_fragment else dep_link.url_without_fragment
elif is_vcs(dep):
# Generate a Link object for parsing egg fragments
dep_link = Link(dep)
# Save the original path to store in the pipfile
uri = dep_link.url
# Construct the requirement using proper git+ssh:// replaced uris or names if available
cleaned_uri = clean_git_uri(dep)
dep = cleaned_uri
if editable:
dep = '-e {0}'.format(dep)
req = [r for r in requirements.parse(dep)][0]
# if all we built was the requirement name and still need everything else
if req.name and not any([req.uri, req.path]):
if dep_link:
if dep_link.scheme.startswith('file') and path and not req.path:
req.path = path
req.local_file = True
req.uri = None
else:
req.uri = dep_link.url_without_fragment
# If the result is a local file with a URI and we have a local path, unset the URI
# and set the path instead -- note that local files may have 'path' set by accident
elif req.local_file and path and not req.vcs:
req.path = path
req.uri = None
if dep_link and dep_link.is_wheel and not req.name:
req.name = os.path.basename(Wheel(dep_link.path).name)
elif req.vcs and req.uri and cleaned_uri and cleaned_uri != uri:
req.uri = strip_ssh_from_git_uri(req.uri)
req.line = strip_ssh_from_git_uri(req.line)
req.editable = editable
if markers:
req.markers = markers
if extras:
# Bizarrely this is also what pip does...
req.extras = [
r for r in requirements.parse('fakepkg{0}'.format(extras))
][
0
].extras
return req
def cleanup_toml(tml):
toml = tml.split('\n')
new_toml = []
# Remove all empty lines from TOML.
for line in toml:
if line.strip():
new_toml.append(line)
toml = '\n'.join(new_toml)
new_toml = []
# Add newlines between TOML sections.
for i, line in enumerate(toml.split('\n')):
# Skip the first line.
if line.startswith('['):
if i > 0:
# Insert a newline before the heading.
new_toml.append('')
new_toml.append(line)
# adding new line at the end of the TOML file
new_toml.append('')
toml = '\n'.join(new_toml)
return toml
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_pattern = re.compile(r'''
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
''', re.VERBOSE)
match = version_pattern.match(output)
if not match:
return None
return match.groupdict(default='0')
def python_version(path_to_python):
if not path_to_python:
return None
try:
c = delegator.run([path_to_python, '--version'], block=False)
except Exception:
return None
c.block()
version = parse_python_version(c.out.strip() or c.err.strip())
try:
version = u'{major}.{minor}.{micro}'.format(**version)
except TypeError:
return None
return version
def escape_grouped_arguments(s):
"""Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
"""
if s is None:
return None
# Additional escaping for windows paths
if os.name == 'nt':
s = "{}".format(s.replace("\\", "\\\\"))
return '"' + s.replace("'", "'\\''") + '"'
def clean_pkg_version(version):
"""Uses pip to prepare a package version string, from our internal version."""
return six.u(pep440_version(str(version).replace('==', '')))
class HackedPythonVersion(object):
"""A Beautiful hack, which allows us to tell pip which version of Python we're using."""
def __init__(self, python_version, python_path):
self.python_version = python_version
self.python_path = python_path
def __enter__(self):
os.environ['PIP_PYTHON_VERSION'] = str(self.python_version)
os.environ['PIP_PYTHON_PATH'] = str(self.python_path)
def __exit__(self, *args):
# Restore original Python version information.
del os.environ['PIP_PYTHON_VERSION']
def prepare_pip_source_args(sources, pip_args=None):
if pip_args is None:
pip_args = []
if sources:
# Add the source to pip9.
pip_args.extend(['-i', sources[0]['url']])
# Trust the host if it's not verified.
if not sources[0].get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(sources[0]['url']).netloc.split(':')[0],
]
)
# Add additional sources as extra indexes.
if len(sources) > 1:
for source in sources[1:]:
pip_args.extend(['--extra-index-url', source['url']])
# Trust the host if it's not verified.
if not source.get('verify_ssl', True):
pip_args.extend(
[
'--trusted-host',
urlparse(source['url']).hostname,
]
)
return pip_args
def actually_resolve_reps(
deps, index_lookup, markers_lookup, project, sources, verbose, clear, pre
):
from pip9 import basecommand, req
from pip9._vendor import requests as pip_requests
class PipCommand(basecommand.Command):
"""Needed for pip-tools."""
name = 'PipCommand'
constraints = []
req_dir = tempfile.mkdtemp(prefix='pipenv-', suffix='-requirements')
for dep in deps:
if dep:
if dep.startswith('-e '):
constraint = req.InstallRequirement.from_editable(
dep[len('-e '):]
)
else:
fd, t = tempfile.mkstemp(
prefix='pipenv-', suffix='-requirement.txt', dir=req_dir
)
with os.fdopen(fd, 'w') as f:
f.write(dep)
constraint = [
c for c in req.parse_requirements(t, session=pip_requests)
][
0
]
# extra_constraints = []
if ' -i ' in dep:
index_lookup[constraint.name] = project.get_source(
url=dep.split(' -i ')[1]
).get(
'name'
)
if constraint.markers:
markers_lookup[constraint.name] = str(
constraint.markers
).replace(
'"', "'"
)
constraints.append(constraint)
rmtree(req_dir)
pip_command = get_pip_command()
pip_args = []
if sources:
pip_args = prepare_pip_source_args(sources, pip_args)
if verbose:
print('Using pip: {0}'.format(' '.join(pip_args)))
pip_options, _ = pip_command.parse_args(pip_args)
session = pip_command._build_session(pip_options)
pypi = PyPIRepository(
pip_options=pip_options, use_json=False, session=session
)
if verbose:
logging.log.verbose = True
piptools_logging.log.verbose = True
resolved_tree = set()
resolver = Resolver(
constraints=constraints,
repository=pypi,
clear_caches=clear,
prereleases=pre,
)
# pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages
try:
resolved_tree.update(resolver.resolve(max_rounds=PIPENV_MAX_ROUNDS))
except (NoCandidateFound, DistributionNotFound, HTTPError) as e:
click.echo(
'{0}: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies.\n '
'You can use {1} to bypass this mechanism, then run {2} to inspect the situation.'
''.format(
crayons.red('Warning', bold=True),
crayons.red('$ pipenv install --skip-lock'),
crayons.red('$ pipenv graph'),
),
err=True,
)
click.echo(crayons.blue(str(e)), err=True)
if 'no version found at all' in str(e):
click.echo(
crayons.blue(
'Please check your version specifier and version number. See PEP440 for more information.'
)
)
raise RuntimeError
return resolved_tree, resolver
def venv_resolve_deps(
deps, which, project, pre=False, verbose=False, clear=False, allow_global=False
):
from . import resolver
import json
resolver = escape_grouped_arguments(resolver.__file__.rstrip('co'))
cmd = '{0} {1} {2} {3} {4} {5}'.format(
escape_grouped_arguments(which('python')),
resolver,
'--pre' if pre else '',
'--verbose' if verbose else '',
'--clear' if clear else '',
'--system' if allow_global else '',
)
os.environ['PIPENV_PACKAGES'] = '\n'.join(deps)
c = delegator.run(cmd, block=True)
del os.environ['PIPENV_PACKAGES']
try:
assert c.return_code == 0
except AssertionError:
if verbose:
click.echo(c.out, err=True)
click.echo(c.err, err=True)
else:
click.echo(c.err[int(len(c.err) / 2) - 1:], err=True)
sys.exit(c.return_code)
if verbose:
click.echo(c.out.split('RESULTS:')[0], err=True)
try:
return json.loads(c.out.split('RESULTS:')[1].strip())
except IndexError:
raise RuntimeError('There was a problem with locking.')
def resolve_deps(
deps,
which,
project,
sources=None,
verbose=False,
python=False,
clear=False,
pre=False,
allow_global=False,
):
"""Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip9.
"""
index_lookup = {}
markers_lookup = {}
python_path = which('python', allow_global=allow_global)
backup_python_path = sys.executable
results = []
# First (proper) attempt:
with HackedPythonVersion(python_version=python, python_path=python_path):
try:
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
# Don't exit here, like usual.
resolved_tree = None
# Second (last-resort) attempt:
if resolved_tree is None:
with HackedPythonVersion(
python_version='.'.join([str(s) for s in sys.version_info[:3]]),
python_path=backup_python_path,
):
try:
# Attempt to resolve again, with different Python version information,
# particularly for particularly particular packages.
resolved_tree, resolver = actually_resolve_reps(
deps,
index_lookup,
markers_lookup,
project,
sources,
verbose,
clear,
pre,
)
except RuntimeError:
sys.exit(1)
for result in resolved_tree:
if not result.editable:
name = pep423_name(result.name)
version = clean_pkg_version(result.specifier)
index = index_lookup.get(result.name)
if not markers_lookup.get(result.name):
markers = str(
result.markers
) if result.markers and 'extra' not in str(
result.markers
) else None
else:
markers = markers_lookup.get(result.name)
collected_hashes = []
if any('python.org' in source['url'] or 'pypi.org' in source['url']
for source in sources):
try:
# Grab the hashes from the new warehouse API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(name),
timeout=10,
)
api_releases = r.json()['releases']
cleaned_releases = {}
for api_version, api_info in api_releases.items():
cleaned_releases[
clean_pkg_version(api_version)
] = api_info
for release in cleaned_releases[version]:
collected_hashes.append(release['digests']['sha256'])
collected_hashes = [
'sha256:' + s for s in collected_hashes
]
except (ValueError, KeyError, ConnectionError):
if verbose:
click.echo(
'{0}: Error generating hash for {1}'.format(
crayons.red('Warning', bold=True), name
)
)
# Collect un-collectable hashes (should work with devpi).
try:
collected_hashes = collected_hashes + list(
list(resolver.resolve_hashes([result]).items())[0][1]
)
except (ValueError, KeyError, ConnectionError, IndexError):
if verbose:
print('Error generating hash for {}'.format(name))
collected_hashes = sorted(set(collected_hashes))
d = {'name': name, 'version': version, 'hashes': collected_hashes}
if index:
d.update({'index': index})
if markers:
d.update({'markers': markers.replace('"', "'")})
results.append(d)
return results
def multi_split(s, split):
"""Splits on multiple given separators."""
for r in split:
s = s.replace(r, '|')
return [i for i in s.split('|') if len(i) > 0]
def convert_deps_from_pip(dep):
""""Converts a pip-formatted dependency to a Pipfile-formatted one."""
dependency = {}
req = get_requirement(dep)
extras = {'extras': req.extras}
# File installs.
if (req.uri or req.path or is_installable_file(req.name)) and not req.vcs:
# Assign a package name to the file, last 7 of it's sha256 hex digest.
if not req.uri and not req.path:
req.path = os.path.abspath(req.name)
hashable_path = req.uri if req.uri else req.path
if not req.name:
req.name = hashlib.sha256(hashable_path.encode('utf-8')).hexdigest()
req.name = req.name[len(req.name) - 7:]
# {path: uri} TOML (spec 4 I guess...)
if req.uri:
dependency[req.name] = {'file': hashable_path}
else:
dependency[req.name] = {'path': hashable_path}
if req.extras:
dependency[req.name].update(extras)
# Add --editable if applicable
if req.editable:
dependency[req.name].update({'editable': True})
# VCS Installs.
elif req.vcs:
if req.name is None:
raise ValueError(
'pipenv requires an #egg fragment for version controlled '
'dependencies. Please install remote dependency '
'in the form {0}#egg=<package-name>.'.format(req.uri)
)
# Crop off the git+, etc part.
if req.uri.startswith('{0}+'.format(req.vcs)):
req.uri = req.uri[len(req.vcs) + 1:]
dependency.setdefault(req.name, {}).update({req.vcs: req.uri})
# Add --editable, if it's there.
if req.editable:
dependency[req.name].update({'editable': True})
# Add subdirectory, if it's there
if req.subdirectory:
dependency[req.name].update({'subdirectory': req.subdirectory})
# Add the specifier, if it was provided.
if req.revision:
dependency[req.name].update({'ref': req.revision})
# Extras: e.g. #egg=requests[security]
if req.extras:
dependency[req.name].update({'extras': req.extras})
elif req.extras or req.specs or hasattr(req, 'markers'):
specs = None
# Comparison operators: e.g. Django>1.10
if req.specs:
r = multi_split(dep, '!=<>~')
specs = dep[len(r[0]):]
dependency[req.name] = specs
# Extras: e.g. requests[socks]
if req.extras:
dependency[req.name] = extras
if specs:
dependency[req.name].update({'version': specs})
if hasattr(req, 'markers'):
if isinstance(dependency[req.name], six.string_types):
dependency[req.name] = {'version': specs}
dependency[req.name].update({'markers': req.markers})
# Bare dependencies: e.g. requests
else:
dependency[dep] = '*'
# Cleanup when there's multiple values, e.g. -e.
if len(dependency) > 1:
for key in dependency.copy():
if not hasattr(dependency[key], 'keys'):
del dependency[key]
return dependency
def is_star(val):
return isinstance(val, six.string_types) and val == '*'
def is_pinned(val):
return isinstance(val, six.string_types) and val.startswith('==')
def convert_deps_to_pip(deps, project=None, r=True, include_index=False):
""""Converts a Pipfile-formatted dependency to a pip-formatted one."""
dependencies = []
for dep in deps.keys():
# Default (e.g. '>1.10').
extra = deps[dep] if isinstance(deps[dep], six.string_types) else ''
version = ''
index = ''
# Get rid of '*'.
if is_star(deps[dep]) or str(extra) == '{}':
extra = ''
hash = ''
# Support for single hash (spec 1).
if 'hash' in deps[dep]:
hash = ' --hash={0}'.format(deps[dep]['hash'])
# Support for multiple hashes (spec 2).
if 'hashes' in deps[dep]:
hash = '{0} '.format(
''.join(
[' --hash={0} '.format(h) for h in deps[dep]['hashes']]
)
)
# Support for extras (e.g. requests[socks])
if 'extras' in deps[dep]:
extra = '[{0}]'.format(','.join(deps[dep]['extras']))
if 'version' in deps[dep]:
if not is_star(deps[dep]['version']):
version = deps[dep]['version']
# For lockfile format.
if 'markers' in deps[dep]:
specs = '; {0}'.format(deps[dep]['markers'])
else:
# For pipfile format.
specs = []
for specifier in specifiers:
if specifier in deps[dep]:
if not is_star(deps[dep][specifier]):
specs.append(
'{0} {1}'.format(specifier, deps[dep][specifier])
)
if specs:
specs = '; {0}'.format(' and '.join(specs))
else:
specs = ''
if include_index and not is_file(deps[dep]) and not is_vcs(deps[dep]):
pip_src_args = []
if 'index' in deps[dep]:
pip_src_args = [project.get_source(deps[dep]['index'])]
else:
pip_src_args = project.sources
pip_args = prepare_pip_source_args(pip_src_args)
index = ' '.join(pip_args)
# Support for version control
maybe_vcs = [vcs for vcs in VCS_LIST if vcs in deps[dep]]
vcs = maybe_vcs[0] if maybe_vcs else None
# Support for files.
if 'file' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['file']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
# Support for paths.
elif 'path' in deps[dep]:
extra = '{1}{0}'.format(extra, deps[dep]['path']).strip()
# Flag the file as editable if it is a local relative path
if 'editable' in deps[dep]:
dep = '-e '
else:
dep = ''
if vcs:
extra = '{0}+{1}'.format(vcs, deps[dep][vcs])
# Support for @refs.
if 'ref' in deps[dep]:
extra += '@{0}'.format(deps[dep]['ref'])
extra += '#egg={0}'.format(dep)
# Support for subdirectory
if 'subdirectory' in deps[dep]:
extra += '&subdirectory={0}'.format(deps[dep]['subdirectory'])
# Support for editable.
if 'editable' in deps[dep]:
# Support for --egg.
dep = '-e '
else:
dep = ''
s = '{0}{1}{2}{3}{4} {5}'.format(
dep, extra, version, specs, hash, index
).strip()
dependencies.append(s)
if not r:
return dependencies
# Write requirements.txt to tmp directory.
f = tempfile.NamedTemporaryFile(suffix='-requirements.txt', delete=False)
f.write('\n'.join(dependencies).encode('utf-8'))
f.close()
return f.name
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
newdir
)
)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
if tail:
os.mkdir(newdir)
def is_required_version(version, specified_version):
"""Check to see if there's a hard requirement for version
number provided in the Pipfile.
"""
# Certain packages may be defined with multiple values.
if isinstance(specified_version, dict):
specified_version = specified_version.get('version', '')
if specified_version.startswith('=='):
return version.strip() == specified_version.split('==')[1].strip()
return True
def strip_ssh_from_git_uri(uri):
"""Return git+ssh:// formatted URI to git+git@ format"""
if isinstance(uri, six.string_types):
uri = uri.replace('git+ssh://', 'git+')
return uri
def clean_git_uri(uri):
"""Cleans VCS uris from pip9 format"""
if isinstance(uri, six.string_types):
# Add scheme for parsing purposes, this is also what pip does
if uri.startswith('git+') and '://' not in uri:
uri = uri.replace('git+', 'git+ssh://')
return uri
def is_editable(pipfile_entry):
if hasattr(pipfile_entry, 'get'):
return pipfile_entry.get('editable', False) and any(
pipfile_entry.get(key) for key in ('file', 'path') + VCS_LIST
)
return False
def is_vcs(pipfile_entry):
from .vendor import requirements
"""Determine if dictionary entry from Pipfile is for a vcs dependency."""
if hasattr(pipfile_entry, 'keys'):
return any(key for key in pipfile_entry.keys() if key in VCS_LIST)
elif isinstance(pipfile_entry, six.string_types):
return bool(
requirements.requirement.VCS_REGEX.match(
clean_git_uri(pipfile_entry)
)
)
return False
def is_installable_file(path):
"""Determine if a path can potentially be installed"""
from .vendor.pip9.utils import is_installable_dir
from .vendor.pip9.utils.packaging import specifiers
if hasattr(path, 'keys') and any(
key for key in path.keys() if key in ['file', 'path']
):
path = urlparse(path['file']).path if 'file' in path else path['path']
if not isinstance(path, six.string_types) or path == '*':
return False
# If the string starts with a valid specifier operator, test if it is a valid
# specifier set before making a path object (to avoid breaking windows)
if any(path.startswith(spec) for spec in '!=<>~'):
try:
specifiers.SpecifierSet(path)
# If this is not a valid specifier, just move on and try it as a path
except specifiers.InvalidSpecifier:
pass
else:
return False
if not os.path.exists(os.path.abspath(path)):
return False
lookup_path = Path(path)
absolute_path = '{0}'.format(lookup_path.absolute())
if lookup_path.is_dir() and is_installable_dir(absolute_path):
return True
elif lookup_path.is_file() and is_archive_file(absolute_path):
return True
return False
def is_file(package):
"""Determine if a package name is for a File dependency."""
if hasattr(package, 'keys'):
return any(key for key in package.keys() if key in ['file', 'path'])
if os.path.exists(str(package)):
return True
for start in SCHEME_LIST:
if str(package).startswith(start):
return True
return False
def pep440_version(version):
"""Normalize version to PEP 440 standards"""
from .vendor.pip9.index import parse_version
# Use pip built-in version parser.
return str(parse_version(version))
def pep423_name(name):
"""Normalize package name to PEP 423 style standard."""
name = name.lower()
if any(i not in name for i in (VCS_LIST + SCHEME_LIST)):
return name.replace('_', '-')
else:
return name
def proper_case(package_name):
"""Properly case project name from pypi.org."""
# Hit the simple API.
r = requests.get(
'https://pypi.org/pypi/{0}/json'.format(package_name),
timeout=0.3,
stream=True,
)
if not r.ok:
raise IOError(
'Unable to find package {0} in PyPI repository.'.format(
package_name
)
)
r = parse.parse('https://pypi.org/pypi/{name}/json', r.url)
good_name = r['name']
return good_name
def split_section(input_file, section_suffix, test_function):
"""
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
"""
pipfile_sections = ('packages', 'dev-packages')
lockfile_sections = ('default', 'develop')
if any(section in input_file for section in pipfile_sections):
sections = pipfile_sections
elif any(section in input_file for section in lockfile_sections):
sections = lockfile_sections
else:
# return the original file if we can't find any pipfile or lockfile sections
return input_file
for section in sections:
split_dict = {}
entries = input_file.get(section, {})
for k in list(entries.keys()):
if test_function(entries.get(k)):
split_dict[k] = entries.pop(k)
input_file['-'.join([section, section_suffix])] = split_dict
return input_file
def split_file(file_dict):
"""Split VCS and editable dependencies out from file."""
sections = {
'vcs': is_vcs,
'editable': lambda x: hasattr(x, 'keys') and x.get('editable'),
}
for k, func in sections.items():
file_dict = split_section(file_dict, k, func)
return file_dict
def merge_deps(
file_dict,
project,
dev=False,
requirements=False,
ignore_hashes=False,
blocking=False,
only=False,
):
"""
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
"""
deps = []
requirements_deps = []
for section in list(file_dict.keys()):
# Turn develop-vcs into ['develop', 'vcs']
section_name, suffix = section.rsplit(
'-', 1
) if '-' in section and not section == 'dev-packages' else (
section, None
)
if not file_dict[section] or section_name not in (
'dev-packages', 'packages', 'default', 'develop'
):
continue
is_dev = section_name in ('dev-packages', 'develop')
if is_dev and not dev:
continue
if ignore_hashes:
for k, v in file_dict[section]:
if 'hash' in v:
del v['hash']
# Block and ignore hashes for all suffixed sections (vcs/editable)
no_hashes = True if suffix else ignore_hashes
block = True if suffix else blocking
include_index = True if not suffix else False
converted = convert_deps_to_pip(
file_dict[section], project, r=False, include_index=include_index
)
deps.extend((d, no_hashes, block) for d in converted)
if dev and is_dev and requirements:
requirements_deps.extend((d, no_hashes, block) for d in converted)
return deps, requirements_deps
def recase_file(file_dict):
"""Recase file before writing to output."""
if 'packages' in file_dict or 'dev-packages' in file_dict:
sections = ('packages', 'dev-packages')
elif 'default' in file_dict or 'develop' in file_dict:
sections = ('default', 'develop')
for section in sections:
file_section = file_dict.get(section, {})
# Try to properly case each key if we can.
for key in list(file_section.keys()):
try:
cased_key = proper_case(key)
except IOError:
cased_key = key
file_section[cased_key] = file_section.pop(key)
return file_dict
def get_windows_path(*args):
"""Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path"""
return os.path.normpath(os.path.join(*args))
def find_windows_executable(bin_path, exe_name):
"""Given an executable name, search the given location for an executable"""
requested_path = get_windows_path(bin_path, exe_name)
if os.path.exists(requested_path):
return requested_path
# Ensure we aren't adding two layers of file extensions
exe_name = os.path.splitext(exe_name)[0]
files = [
'{0}.{1}'.format(exe_name, ext) for ext in ['', 'py', 'exe', 'bat']
]
exec_paths = [get_windows_path(bin_path, f) for f in files]
exec_files = [
filename for filename in exec_paths if os.path.isfile(filename)
]
if exec_files:
return exec_files[0]
return find_executable(exe_name)
def path_to_url(path):
return Path(normalize_drive(os.path.abspath(path))).as_uri()
def get_converted_relative_path(path, relative_to=os.curdir):
"""Given a vague relative path, return the path relative to the given location"""
return os.path.join('.', os.path.relpath(path, start=relative_to))
def walk_up(bottom):
"""Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
"""
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, '..'))
# See if we are at the top.
if new_path == bottom:
return
for x in walk_up(new_path):
yield x
def find_requirements(max_depth=3):
"""Returns the path of a Pipfile in parent directories."""
i = 0
for c, d, f in walk_up(os.getcwd()):
i += 1
if i < max_depth:
if 'requirements.txt':
r = os.path.join(c, 'requirements.txt')
if os.path.isfile(r):
return r
raise RuntimeError('No requirements.txt found!')
# Borrowed from pew to avoid importing pew which imports psutil
# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82
@contextmanager
def temp_environ():
"""Allow the ability to set os.environ temporarily"""
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def is_valid_url(url):
"""Checks if a given string is an url"""
pieces = urlparse(url)
return all([pieces.scheme, pieces.netloc])
def download_file(url, filename):
"""Downloads file from url to a path with filename"""
r = requests.get(url, stream=True)
if not r.ok:
raise IOError('Unable to download file')
with open(filename, 'wb') as f:
f.write(r.content)
def need_update_check():
"""Determines whether we need to check for updates."""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
if not os.path.exists(p):
return True
out_of_date_time = time() - (24 * 60 * 60)
if os.path.isfile(p) and os.path.getmtime(p) <= out_of_date_time:
return True
else:
return False
def touch_update_stamp():
"""Touches PIPENV_CACHE_DIR/.pipenv_update_check"""
mkdir_p(PIPENV_CACHE_DIR)
p = os.sep.join((PIPENV_CACHE_DIR, '.pipenv_update_check'))
try:
os.utime(p, None)
except OSError:
with open(p, 'w') as fh:
fh.write('')
def normalize_drive(path):
"""Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
"""
if os.name != 'nt' or not isinstance(path, six.string_types):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ':':
return '{}{}'.format(drive.upper(), tail)
return path
def is_readonly_path(fn):
"""Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
"""
if os.path.exists(fn):
return (os.stat(fn).st_mode & stat.S_IREAD) or not os.access(
fn, os.W_OK
)
return False
def set_write_bit(fn):
if os.path.exists(fn):
os.chmod(fn, stat.S_IWRITE | stat.S_IWUSR)
return
def rmtree(directory, ignore_errors=False):
shutil.rmtree(
directory, ignore_errors=ignore_errors, onerror=handle_remove_readonly
)
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion."""
# Check for read-only attribute
default_warning_message = 'Unable to remove file due to permissions restriction: {!r}'
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError) as e:
if e.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(
default_warning_message.format(path), ResourceWarning
)
return
if exc_exception.errno in [errno.EACCES, errno.EPERM]:
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
raise
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix, prefix, dir=None):
if 'RAM_DISK' in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ['RAM_DISK'].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
self.name = tempfile.mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _cleanup(cls, name, warn_message):
rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
rmtree(self.name)
| pipenv/utils.py | 43,253 | A Beautiful hack, which allows us to tell pip which version of Python we're using.
Needed for pip-tools.
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
Cleans VCS uris from pip9 format
Uses pip to prepare a package version string, from our internal version.
"Converts a pip-formatted dependency to a Pipfile-formatted one.
"Converts a Pipfile-formatted dependency to a pip-formatted one.
Downloads file from url to a path with filename
Prepares a string for the shell (on Windows too!)
Only for use on grouped arguments (passed as a string to Popen)
Returns the path of a Pipfile in parent directories.
Given an executable name, search the given location for an executable
Given a vague relative path, return the path relative to the given location
Sanitize a path for windows environments
Accepts an arbitrary list of arguments and makes a clean windows path
Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
Determine if a package name is for a File dependency.
Determine if a path can potentially be installed
Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
Check to see if there's a hard requirement for version
number provided in the Pipfile.
Checks if a given string is an url
Given a file_dict, merges dependencies and converts them to pip dependency lists.
:param dict file_dict: The result of calling :func:`pipenv.utils.split_file`
:param :class:`pipenv.project.Project` project: Pipenv project
:param bool dev=False: Flag indicating whether dev dependencies are to be installed
:param bool requirements=False: Flag indicating whether to use a requirements file
:param bool ignore_hashes=False:
:param bool blocking=False:
:param bool only=False:
:return: Pip-converted 3-tuples of [deps, requirements_deps]
works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
From: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
Splits on multiple given separators.
Determines whether we need to check for updates.
Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
See: <https://github.com/pypa/pipenv/issues/1218>
Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
Normalize package name to PEP 423 style standard.
Normalize version to PEP 440 standards
Properly case project name from pypi.org.
Recase file before writing to output.
Given a list of dependencies, return a resolved list of dependencies,
using pip-tools -- and their hashes, using the warehouse API / pip9.
Split VCS and editable dependencies out from file.
Split a pipfile or a lockfile section out by section name and test function
:param dict input_file: A dictionary containing either a pipfile or lockfile
:param str section_suffix: A string of the name of the section
:param func test_function: A test function to test against the value in the key/value pair
>>> split_section(my_lockfile, 'vcs', is_vcs)
{
'default': {
"six": {
"hashes": [
"sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb",
"sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9"
],
"version": "==1.11.0"
}
},
'default-vcs': {
"e1839a8": {
"editable": true,
"path": "."
}
}
}
Return git+ssh:// formatted URI to git+git@ format
Allow the ability to set os.environ temporarily
Touches PIPENV_CACHE_DIR/.pipenv_update_check
Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
-*- coding: utf-8 -*- List of version control systems we support. check for editable dep / vcs dep Use the user supplied path as the written dependency Split out markers if they are present - similar to how pip does it See pip9.req.req_install.InstallRequirement.from_line Strip extras from the requirement so we can make a properly parseable req Only operate on local, existing, non-URI formatted paths which are installable Generate a Link object for parsing egg fragments Save the original path to store in the pipfile Construct the requirement using proper git+ssh:// replaced uris or names if available if all we built was the requirement name and still need everything else If the result is a local file with a URI and we have a local path, unset the URI and set the path instead -- note that local files may have 'path' set by accident Bizarrely this is also what pip does... Remove all empty lines from TOML. Add newlines between TOML sections. Skip the first line. Insert a newline before the heading. adding new line at the end of the TOML file Additional escaping for windows paths Restore original Python version information. Add the source to pip9. Trust the host if it's not verified. Add additional sources as extra indexes. Trust the host if it's not verified. extra_constraints = [] pre-resolve instead of iterating to avoid asking pypi for hashes of editable packages First (proper) attempt: Don't exit here, like usual. Second (last-resort) attempt: Attempt to resolve again, with different Python version information, particularly for particularly particular packages. Grab the hashes from the new warehouse API. Collect un-collectable hashes (should work with devpi). File installs. Assign a package name to the file, last 7 of it's sha256 hex digest. {path: uri} TOML (spec 4 I guess...) Add --editable if applicable VCS Installs. Crop off the git+, etc part. Add --editable, if it's there. Add subdirectory, if it's there Add the specifier, if it was provided. Extras: e.g. egg=requests[security] Comparison operators: e.g. Django>1.10 Extras: e.g. requests[socks] Bare dependencies: e.g. requests Cleanup when there's multiple values, e.g. -e. Default (e.g. '>1.10'). Get rid of '*'. Support for single hash (spec 1). Support for multiple hashes (spec 2). Support for extras (e.g. requests[socks]) For lockfile format. For pipfile format. Support for version control Support for files. Flag the file as editable if it is a local relative path Support for paths. Flag the file as editable if it is a local relative path Support for @refs. Support for subdirectory Support for editable. Support for --egg. Write requirements.txt to tmp directory. Certain packages may be defined with multiple values. Add scheme for parsing purposes, this is also what pip does If the string starts with a valid specifier operator, test if it is a valid specifier set before making a path object (to avoid breaking windows) If this is not a valid specifier, just move on and try it as a path Use pip built-in version parser. Hit the simple API. return the original file if we can't find any pipfile or lockfile sections Turn develop-vcs into ['develop', 'vcs'] Block and ignore hashes for all suffixed sections (vcs/editable) Try to properly case each key if we can. Ensure we aren't adding two layers of file extensions Get files in current dir. See if we are at the top. Borrowed from pew to avoid importing pew which imports psutil See https://github.com/berdario/pew/blob/master/pew/_utils.pyL82 Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts. Check for read-only attribute split the initial exception out into its type, exception, and traceback Apply write permission and call original function | 8,195 | en | 0.790413 |
from ui import *
startUI()
# # - read the input data:
# import MnistLoader
# training_data, validation_data, test_data = MnistLoader.load_data_wrapper()
# training_data = list(training_data)
# # ---------------------
# # - network.py example:
# from Network import Network, vectorized_result
# from NetworkLoader import save, load
# # netPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\Lab\\Models\\model_5epochs.json";
# # net = load(netPath)
# # # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # # predict(imgPath, 7, net)
# # # net = Network([784, 30, 10])
# # # net.run(training_data, 5, 10, 3.0, test_data=test_data, monitor_evaluation_cost=True,
# # # monitor_evaluation_accuracy=True,
# # # monitor_training_cost=True,
# # # monitor_training_accuracy=True)
# # imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png"
# # #predict(imgPath, net)
# # save(net, "E:\ITMO University\Интеллектуальные системы и технологии\Lab5\Lab\Models\model_5epochs.json")
# from ui import *
# net = ""
# startUI()
# # ----------------------
# # - network2.py example:
# # import network2
# # net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
# # #net.large_weight_initializer()
# # net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,evaluation_data=validation_data,
# # monitor_evaluation_accuracy=True)
| Test.py | 1,744 | - read the input data: import MnistLoader training_data, validation_data, test_data = MnistLoader.load_data_wrapper() training_data = list(training_data) --------------------- - network.py example: from Network import Network, vectorized_result from NetworkLoader import save, load netPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\Lab\\Models\\model_5epochs.json"; net = load(netPath) imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png" predict(imgPath, 7, net) net = Network([784, 30, 10]) net.run(training_data, 5, 10, 3.0, test_data=test_data, monitor_evaluation_cost=True, monitor_evaluation_accuracy=True, monitor_training_cost=True, monitor_training_accuracy=True) imgPath = "E:\\ITMO University\\Интеллектуальные системы и технологии\\Lab5\\Lab\\HandTestImages\\0.png" predict(imgPath, net) save(net, "E:\ITMO University\Интеллектуальные системы и технологии\Lab5\Lab\Models\model_5epochs.json") from ui import * net = "" startUI() ---------------------- - network2.py example: import network2 net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost) net.large_weight_initializer() net.SGD(training_data, 30, 10, 0.1, lmbda = 5.0,evaluation_data=validation_data, monitor_evaluation_accuracy=True) | 1,365 | en | 0.176297 |
#!/usr/bin/env python
import unittest
from math import pi # , isnan
from random import random
import gemmi
from gemmi import Position, UnitCell
class TestMath(unittest.TestCase):
def test_SMat33_transformed_by(self):
tensor = gemmi.SMat33f(random(), random(), random(),
random(), random(), random())
mat = gemmi.Mat33()
mat.fromlist([[random() for _ in range(3)] for _ in range(3)])
t1 = tensor.transformed_by(mat).as_mat33().tolist()
t2 = mat.multiply(tensor.as_mat33()).multiply(mat.transpose()).tolist()
for i in range(3):
for j in range(3):
self.assertAlmostEqual(t1[i][j], t2[i][j])
class TestUnitCell(unittest.TestCase):
def test_dummy_cell(self):
cell = UnitCell()
self.assertEqual([cell.a, cell.b, cell.c], [1, 1, 1])
self.assertEqual([cell.alpha, cell.beta, cell.gamma], [90, 90, 90])
self.assertEqual(cell.volume, 1.0)
def test_ortho_cell(self):
cell = UnitCell(25.14, 39.50, 45.07, 90, 90, 90)
pos = Position(5, -6, 7)
frac = cell.fractionalize(pos)
self.assertAlmostEqual(frac.x, 0.198886, delta=1e-6)
self.assertAlmostEqual(frac.y, -0.151899, delta=1e-6)
self.assertAlmostEqual(frac.z, 0.155314, delta=1e-6)
pos2 = cell.orthogonalize(frac)
self.assertAlmostEqual(pos.x, pos2.x, delta=1e-12)
self.assertAlmostEqual(pos.y, pos2.y, delta=1e-12)
self.assertAlmostEqual(pos.z, pos2.z, delta=1e-12)
corner = cell.orthogonalize(gemmi.Fractional(1, 1, 1))
self.assertAlmostEqual(corner.x, cell.a, delta=1e-12)
self.assertAlmostEqual(corner.y, cell.b, delta=1e-12)
self.assertAlmostEqual(corner.z, cell.c, delta=1e-12)
rec = cell.reciprocal()
self.assertEqual([rec.alpha, rec.beta, rec.gamma], [90, 90, 90])
self.assertAlmostEqual(rec.a, 1 / cell.a, delta=1e-17)
def test_triclinic_cell(self):
cell = UnitCell(35.996, 41.601, 45.756, 67.40, 66.90, 74.85)
pos = Position(-15, -17, 190)
frac = cell.fractionalize(pos)
pos2 = cell.orthogonalize(frac)
self.assertAlmostEqual(pos.x, pos2.x, delta=1e-12)
self.assertAlmostEqual(pos.y, pos2.y, delta=1e-12)
self.assertAlmostEqual(pos.z, pos2.z, delta=1e-12)
# tested against values from uctbx:
# from cctbx import uctbx
# uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85))
# uc.d_star_sq((-3, -2, 1))
# uc.d((3, 4, 5))
self.assertAlmostEqual(cell.calculate_1_d2([-3, -2, 1]),
0.0128229081865688, delta=1e-17)
self.assertAlmostEqual(cell.calculate_d([3, 4, 5]),
7.7319559244298, delta=1e-13)
# uc.metrical_matrix()
cctbx_mm = [1295.712016, 1730.643201, 2093.611536,
391.3591013825865, 646.1921687548228, 731.5043620154578]
mt = cell.metric_tensor()
for a, b in zip(mt.elements(), cctbx_mm):
self.assertAlmostEqual(a, b, delta=1e-12)
# uc.reciprocal_metrical_matrix()
cctbx_rmm = [0.00092792089082916, 0.000689632633981, 0.0006277651322979,
-0.000104162588996, -0.000250008091601, -0.000208806754807]
rmt = cell.reciprocal_metric_tensor()
for a, b in zip(rmt.elements(), cctbx_rmm):
self.assertAlmostEqual(a, b, delta=1e-15)
def test_atom_to_site(self):
cell = UnitCell(35.996, 41.601, 45.756, 67.40, 66.90, 74.85)
atom = gemmi.Atom()
atom.aniso = gemmi.SMat33f(13.1, 20.1, 11.1, -3.5, 5.5, -0.4)
site = gemmi.SmallStructure.Site(atom, cell)
# tested against values from cctbx:
# from cctbx import uctbx, adptbx
# uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85))
# aniso = (13.1, 20.1, 11.1, -3.5, 5.5, -0.4)
# ucif = adptbx.u_cart_as_u_cif(uc, aniso)
ucif = [11.537759976524049, 19.43436271641311, 11.1,
-8.078683096677723, 1.4787260755519491, -3.9018967241279157]
for a, b in zip(site.aniso.elements(), ucif):
self.assertAlmostEqual(a, b, delta=1e-6)
class TestAngles(unittest.TestCase):
def test_dihedral_special_cases(self):
a = Position(random(), random(), random())
# not sure what it should be in such undefined cases
#self.assertTrue(isnan(gemmi.calculate_dihedral(a, a, a, a)))
self.assertEqual(gemmi.calculate_dihedral(a, a, a, a), 0.0)
# Special cases from scitbx tst_math.py
# atan2 is guaranteed to give exact values (I think)
p000 = Position(0, 0, 0)
p100 = Position(1, 0, 0)
p010 = Position(0, 1, 0)
def xy_dihedral(last_point):
return gemmi.calculate_dihedral(p100, p000, p010, last_point)
self.assertEqual(xy_dihedral(Position(1, 1, 0)), 0.0)
self.assertEqual(xy_dihedral(Position(-1, 1, 0)), pi)
p01_ = Position(0, 1, -1)
self.assertEqual(xy_dihedral(p01_), pi/2)
p01_.z = 1
self.assertEqual(xy_dihedral(p01_), -pi/2)
def test_dihedral(self):
# based on from https://stackoverflow.com/questions/20305272/
p0 = Position(24.969, 13.428, 30.692) # N
p1 = Position(24.044, 12.661, 29.808) # CA
p2 = Position(22.785, 13.482, 29.543) # C
p3 = Position(21.951, 13.670, 30.431) # O
p4 = Position(23.672, 11.328, 30.466) # CB
p5 = Position(22.881, 10.326, 29.620) # CG
p6 = Position(23.691, 9.935, 28.389) # CD1
p7 = Position(22.557, 9.096, 30.459) # CD2
def check_dihedral(a, b, c, d, angle):
deg = gemmi.calculate_dihedral(a, b, c, d) * 180 / pi
self.assertAlmostEqual(deg, angle, places=4)
check_dihedral(p0, p1, p2, p3, -71.21515)
check_dihedral(p0, p1, p4, p5, -171.94319)
check_dihedral(p1, p4, p5, p6, 60.82226)
check_dihedral(p1, p4, p5, p7, -177.63641)
if __name__ == '__main__':
unittest.main()
| tests/test_unitcell.py | 6,124 | !/usr/bin/env python , isnan tested against values from uctbx: from cctbx import uctbx uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85)) uc.d_star_sq((-3, -2, 1)) uc.d((3, 4, 5)) uc.metrical_matrix() uc.reciprocal_metrical_matrix() tested against values from cctbx: from cctbx import uctbx, adptbx uc = uctbx.unit_cell((35.996, 41.601, 45.756, 67.40, 66.90, 74.85)) aniso = (13.1, 20.1, 11.1, -3.5, 5.5, -0.4) ucif = adptbx.u_cart_as_u_cif(uc, aniso) not sure what it should be in such undefined casesself.assertTrue(isnan(gemmi.calculate_dihedral(a, a, a, a))) Special cases from scitbx tst_math.py atan2 is guaranteed to give exact values (I think) based on from https://stackoverflow.com/questions/20305272/ N CA C O CB CG CD1 CD2 | 757 | en | 0.50889 |
# Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.2"
from typed_python.internals import Class, Member, Function, UndefinedBehaviorException, makeNamedTuple
from typed_python.type_function import TypeFunction
from typed_python.hash import sha_hash
from typed_python.SerializationContext import SerializationContext
from typed_python.type_filter import TypeFilter
from typed_python._types import (
Forward, TupleOf, ListOf, Tuple, NamedTuple, OneOf, ConstDict,
Alternative, Value, serialize, deserialize, serializeStream, deserializeStream,
PointerTo, Dict, validateSerializedObject, validateSerializedObjectStream, decodeSerializedObject,
getOrSetTypeResolver
)
import typed_python._types as _types
# in the c module, these are functions, but because they're not parametrized,
# we want them to be actual values. Otherwise, we'll have 'Float64()'
# where we would have written 'Float64' etc.
Bool = _types.Bool()
Int8 = _types.Int8()
Int16 = _types.Int16()
Int32 = _types.Int32()
Int64 = _types.Int64()
UInt8 = _types.UInt8()
UInt16 = _types.UInt16()
UInt32 = _types.UInt32()
UInt64 = _types.UInt64()
Float32 = _types.Float32()
Float64 = _types.Float64()
NoneType = _types.NoneType()
String = _types.String()
Bytes = _types.Bytes()
EmbeddedMessage = _types.EmbeddedMessage()
| typed_python/__init__.py | 1,868 | Coyright 2017-2019 Nativepython Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. in the c module, these are functions, but because they're not parametrized, we want them to be actual values. Otherwise, we'll have 'Float64()' where we would have written 'Float64' etc. | 766 | en | 0.882776 |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reddit_disentanglement dataset."""
import collections
import csv
import itertools
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{zhu2019did,
title={Who did They Respond to? Conversation Structure Modeling using Masked Hierarchical Transformer},
author={Zhu, Henghui and Nan, Feng and Wang, Zhiguo and Nallapati, Ramesh and Xiang, Bing},
journal={arXiv preprint arXiv:1911.10666},
year={2019}
}
"""
_DESCRIPTION = """
This dataset contains ~3M messages from reddit.
Every message is labeled with metadata. The task is to predict the id of its
parent message in the corresponding thread.
Each record contains a list of messages from one thread.
Duplicated and broken records are removed from the dataset.
Features are:
- id - message id
- text - message text
- author - message author
- created_utc - message UTC timestamp
- link_id - id of the post that the comment relates to
Target:
- parent_id - id of the parent message in the current thread
"""
_THREAD_KEY = "thread"
_MESSAGE_ID = "id"
_MESSAGE_TEXT = "text"
_MESSAGE_TIMESTAMP = "created_utc"
_MESSAGE_AUTHOR = "author"
_MESSAGE_LINK_ID = "link_id"
_MESSAGE_PARENT_ID = "parent_id"
def _read_csv(path):
with tf.io.gfile.GFile(path) as f:
reader = csv.DictReader(f)
for row in reader:
if row["id"]: # Filter out broken lines in the original dataset
yield row
def _deduplicate(data):
"""Remove duplicated records."""
cnt = collections.Counter(row["id"] for row in data)
nonuniq_ids = set(id for id, count in cnt.items() if count > 1)
nonuniq_data = [row for row in data if row["id"] in nonuniq_ids]
unique_data = [row for row in data if row["id"] not in nonuniq_ids]
# Make sure same id records are next to each other for itertools.groupby
nonuniq_data = sorted(nonuniq_data, key=lambda row: row["id"])
for _, same_id_data in itertools.groupby(nonuniq_data, lambda row: row["id"]):
same_id_data = list(same_id_data)
if all(same_id_data[0] == x for x in same_id_data):
unique_data.append(same_id_data[0])
else:
non_deleted_same_id_data = [row for row in same_id_data
if row["author"] != "[deleted]"]
if len(non_deleted_same_id_data) != 1:
raise ValueError("Found several message with id {} in the original"
" data".format(non_deleted_same_id_data[0]["id"]))
unique_data.append(non_deleted_same_id_data[0])
return sorted(unique_data,
key=lambda row: (row["link_id"], row["created_utc"]))
class RedditDisentanglement(tfds.core.GeneratorBasedBuilder):
"""Reddit Disentanglement dataset."""
VERSION = tfds.core.Version("2.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Download https://github.com/henghuiz/MaskedHierarchicalTransformer, decompress
raw_data.zip and run generate_dataset.py with your reddit api credentials.
Then put train.csv, val.csv and test.csv from the output directory into the
manual folder.
"""
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_THREAD_KEY: tfds.features.Sequence(
tfds.features.FeaturesDict({
_MESSAGE_ID: tfds.features.Text(),
_MESSAGE_TEXT: tfds.features.Text(),
_MESSAGE_TIMESTAMP: tfds.features.Text(),
_MESSAGE_AUTHOR: tfds.features.Text(),
_MESSAGE_LINK_ID: tfds.features.Text(),
_MESSAGE_PARENT_ID: tfds.features.Text()
}))}),
homepage="https://github.com/henghuiz/MaskedHierarchicalTransformer",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "train.csv")},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "val.csv")},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={"path": os.path.join(
dl_manager.manual_dir, "test.csv")},
),
]
def _generate_examples(self, path):
"""Yields examples."""
data = list(_read_csv(path))
data = _deduplicate(data)
for link_id, one_topic_data in itertools.groupby(
data, lambda row: row["link_id"]):
one_topic_data = list(one_topic_data)
for row in one_topic_data:
row["text"] = row.pop("body")
yield link_id, {_THREAD_KEY: one_topic_data}
| tensorflow_datasets/text/reddit_disentanglement.py | 5,429 | Reddit Disentanglement dataset.
Remove duplicated records.
Yields examples.
Returns SplitGenerators.
reddit_disentanglement dataset.
coding=utf-8 Copyright 2020 The TensorFlow Datasets Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Filter out broken lines in the original dataset Make sure same id records are next to each other for itertools.groupby | 836 | en | 0.826337 |
"""Copyright (c) 2005-2017, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This file contains various classes supporting modifications to CellML models.
"""
import pycml
from pycml import *
import validator
class ModelModificationError(ValueError):
"""Error thrown if a model modification is invalid."""
pass
class ModelModifier(object):
"""Base class supporting common model modification functionality.
This class contains the logic to deal with adding/deleting variables, components, equations, etc.
and connecting things up. It also handles re-analysing the model when modifications have been
completed to ensure that PyCml's internal data structures are up-to-date.
Instances should be created with the model to modify as a single parameter. Once all
modifications have been completed, the finalize method must be called to ensure later
processing of the model (e.g. code generation) will succeed.
"""
def __init__(self, model):
"""Constructor."""
self.model = model
self._units_converter = None
self.connections_made = set()
def finalize(self, error_handler, pre_units_check_hook=None, check_units=True):
"""Re-do the model validation steps needed for further processing of the model.
Checks connections, etc. and builds up the dependency graph again, then performs
a topological sort.
If any errors are found during re-validation, the error_handler will be called with the
list. Warnings are ignored.
TODO: figure out how to determine how much of this is actually needed - InterfaceGenerator
can probably get away with less work.
"""
self._clear_model_caches()
# We want to see any errors
logging_info = validator.CellMLValidator.setup_logging(show_errors=True, show_warnings=False)
# Re-run validation & analysis
self.model._check_variable_mappings()
if not self.model._cml_validation_errors:
assignment_exprs = self.model.search_for_assignments()
self.model._check_assigned_vars(assignment_exprs)
if not self.model._cml_validation_errors:
self.model._classify_variables(assignment_exprs)
self.model._order_variables(assignment_exprs)
if not self.model._cml_validation_errors and check_units:
if callable(pre_units_check_hook):
pre_units_check_hook()
self.model._check_connection_units(check_for_units_conversions=False)
self.model._check_dimensional_consistency(assignment_exprs,
xml_context=False,
warn_on_units_errors=self.model.get_option('warn_on_units_errors'),
check_for_units_conversions=False)
if self.model._cml_validation_errors:
error_handler(self.model._cml_validation_errors)
# Clear up logging
validator.CellMLValidator.cleanup_logging(logging_info)
def _clear_model_caches(self):
"""
Clear cached links in the model, since we'll need to recompute many of them
once we've finished modifying it. Also clears dependency information.
"""
for comp in getattr(self.model, u'component', []):
for math in getattr(comp, u'math', []):
math._unset_cached_links()
for var in self.model.get_all_variables():
var.clear_dependency_info()
assignment_exprs = self.model.search_for_assignments()
for expr in assignment_exprs:
expr.clear_dependency_info()
def create_new_component(self, cname):
"""Create a new component in the model, ensuring the name is unique.
If a component with name cname already exists,
underscores will be added to the component name to make it unique.
"""
while True:
try:
self.model.get_component_by_name(cname)
cname += u'_'
except KeyError:
# Component with this name doesn't exist
break
# Create the component
comp = cellml_component.create_new(self.model, cname)
self.model._add_component(comp)
return comp
def connect_variables(self, source, target):
"""Create a connection between the given source and target variables.
The variables are both specified either by a pair (cname,vname), or as cellml_variable objects.
The source variable must exist within the model, whereas the target might not, in
which case it will be created.
Note that in the case that both source and target exist, it might NOT be the case that
target obtains its value from source. They might already be connected, and source obtains
its value from target. Or they might both obtain their value from a common source.
If the variable names are not identical, any variables created will have the same name as the
target, if possible. If there's an existing variable with that name, not connected to the
source, then underscores will be appended to the name to avoid conflicts. Note that we do
check for variables in intermediate components that have the same name as the source and are
connected to it, to avoid adding unnecessary variables.
Returns the target variable object.
"""
if isinstance(source, cellml_variable):
src_cname, src_vname = source.component.name, source.name
else:
src_cname, src_vname = source
if isinstance(target, cellml_variable):
target_cname, target_vname = target.component.name, target.name
else:
target_cname, target_vname = target
src_comp = self.model.get_component_by_name(src_cname)
target_comp = self.model.get_component_by_name(target_cname)
# print "connect_variables(", src_cname, src_vname, "to", target_cname, target_vname, ")"
if src_comp == target_comp:
return target_comp.get_variable_by_name(target_vname)
# Determine encapsulation paths from target & source to the root
src_path = self._parent_path(src_comp)
target_path = self._parent_path(target_comp)
# print "paths: src=", map(lambda c: c and c.name, src_path), map(lambda c: c and c.name, target_path)
# At some point these will share a common path, even if it's just the root itself
meeting_index = self._find_common_tail(src_path, target_path)
# Construct path from source to target, leaving out the root (None)
path = src_path[:meeting_index]
if src_path[meeting_index]:
path.append(src_path[meeting_index])
path.extend(reversed(target_path[:meeting_index]))
# Traverse this path, adding connections at each step
next_src_var = src_comp.get_variable_by_name(src_vname)
# print "conn", map(lambda c: c.name, path), next_src_var, src_vname, target_vname
for i, src_comp in enumerate(path[:-1]):
target_comp = path[i+1]
# print "step", i, "from", next_src_var, "to", target_comp.name, target_vname
next_src_var = self._make_connection(next_src_var, target_comp, target_vname)
# print "step", i, "made", next_src_var
return next_src_var
def _make_connection(self, src_var, target_comp, target_vname):
"""Make a connection from a source variable to a given component and suggested local name.
Note that in the case that both variables already exist and are connected, the existing
connection is allowed to flow in either direction.
"""
src_comp = src_var.component
target_var = self._find_or_create_variable(target_comp.name, target_vname, src_var)
# print "_make_conn", src_var, target_var, target_comp.name, target_vname
# Sanity check the target variable
if (target_var.get_type() == VarTypes.Mapped
and target_var.get_source_variable(recurse=True) is src_var.get_source_variable(recurse=True)):
# print "Connection exists between", src_var, "and target", target_var
return target_var
elif target_var.get_type() == VarTypes.Unknown:
# We've created this variable, so should be ok, but check for gotchas
assert not(hasattr(target_var, u'initial_value'))
if src_comp is target_comp.parent():
src_if = u'private'
target_if = u'public'
elif src_comp.parent() is target_comp:
src_if = u'public'
target_if = u'private'
else:
assert src_comp.parent() is target_comp.parent()
src_if = u'public'
target_if = u'public'
# One special case: if the src_var is actually obtained from a different
# component at this level or above, in which case we should use the real
# source, not that given.
if getattr(src_var, src_if + u'_interface', u'none') == u'in':
src_var = src_var.get_source_variable()
# Check and set the interface attributes
# print "Connecting source", src_var, src_if, getattr(src_var, src_if + u'_interface', u'none'), src_var.units,
# print "to", target_var, target_if, getattr(target_var, target_if + u'_interface', u'none'), target_var.units
assert getattr(src_var, src_if + u'_interface', u'none') != u'in'
assert getattr(target_var, target_if + u'_interface', u'none') != u'out'
src_var.xml_set_attribute((src_if + u'_interface', None), u'out')
target_var.xml_set_attribute((target_if + u'_interface', None), u'in')
# Create the connection element
self._create_connection_element(src_var, target_var)
self.connections_made.add(frozenset([src_var, target_var]))
# Ensure we handle a later connection attempt between these variables correctly
target_var._set_source_variable(src_var)
else:
# Naming conflict; try again with a different target name
return self._make_connection(src_var, target_comp, target_vname + u'_')
return target_var
def _find_connection_element(self, var1, var2):
"""Find any connection element containing a connection of the given variables.
Returns a pair, the first element of which is either the element or None, and the
second of which is a boolean indicating whether the variables need to be swapped
in order to match the order of the components in the connection.
"""
cn1, cn2 = var1.component.name, var2.component.name
cnames = set([cn1, cn2])
for conn in getattr(self.model, u'connection', []):
mc = conn.map_components
if set([mc.component_1, mc.component_2]) == cnames:
break
else:
conn = None
if conn:
swap = conn.map_components.component_1 == cn2
else:
swap = False
return conn, swap
def _create_connection_element(self, var1, var2):
"""Create a connection element connecting the given variables and add to the model.
If there's already a connection element for the relevant pair of components,
we just add another map_variables element to that.
"""
conn, swap = self._find_connection_element(var1, var2)
if conn:
if swap:
var1, var2 = var2, var1
else:
conn = var1.xml_create_element(u'connection', NSS[u'cml'])
mapc = var1.xml_create_element(u'map_components', NSS[u'cml'],
attributes={u'component_1': var1.component.name,
u'component_2': var2.component.name})
conn.xml_append(mapc)
self.model.xml_append(conn)
mapv = var1.xml_create_element(u'map_variables', NSS[u'cml'],
attributes={u'variable_1': var1.name,
u'variable_2': var2.name})
conn.xml_append(mapv)
def remove_connection(self, var1, var2):
"""Remove a connection between two variables.
Removes the relevant map_variables element.
If this results in an empty connection element, removes that as well.
"""
conn, swap = self._find_connection_element(var1, var2)
if not conn:
raise ModelModificationError("Cannot remove non-existent connection.")
if swap:
var1, var2 = var2, var1
# Find the relevant map_variables element
mapv = conn.xml_xpath(u'cml:map_variables[@variable_1="%s" and @variable_2="%s"]'
% (var1.name, var2.name))
if not mapv:
raise ModelModificationError("Cannot remove non-existent connection.")
conn.xml_remove_child(mapv[0])
if not hasattr(conn, u'map_variables'):
conn.xml_parent.xml_remove_child(conn)
def remove_connections(self, var):
"""Remove all connection elements for the given variable.
Removes each relevant map_variables element.
If this results in an empty connection element, removes that as well.
"""
cname, vname = var.component.name, var.name
for conn in list(getattr(self.model, u'connection', [])):
if cname == conn.map_components.component_1:
vid = u'variable_1'
elif cname == conn.map_components.component_2:
vid = u'variable_2'
else:
continue
for mapv in conn.map_variables:
if vname == getattr(mapv, vid, ''):
# Found a connection
conn.xml_remove_child(mapv)
if not hasattr(conn, u'map_variables'):
conn.xml_parent.xml_remove_child(conn)
# There can't be any more matching map_variables in this connection
break
def _update_connections(self, oldVar, newVar):
"""Change all variables connected to oldVar to be mapped to newVar instead."""
vars = [v for v in self.model.get_all_variables() if v.get_source_variable(True) is oldVar]
# Remove old connections, including interfaces and types so creating the new connection works
for v in vars:
self.remove_connections(v)
self.del_attr(v, u'public_interface')
self.del_attr(v, u'private_interface')
v.clear_dependency_info()
# Create new connections
for v in vars:
self.connect_variables(newVar, v)
def _find_common_tail(self, l1, l2):
"""Find the first element at which both lists are identical from then on."""
i = -1
try:
while l1[i] == l2[i]:
i -= 1
except IndexError:
# One list is the tail of the other
pass
# i now gives the last differing element
assert i < -1
return i+1
def _parent_path(self, comp):
"""Return a path of components from that given to the encapsulation root.
The root is specified by None, since we're really dealing with a forest,
not a tree.
"""
path = [comp]
while comp:
path.append(comp.parent())
comp = comp.parent()
return path
def _process_operator(self, expr, operator, func, *args, **kwargs):
"""Apply func to any application of the given operator within the given tree."""
for elt in self.model.xml_element_children(expr):
self._process_operator(elt, operator, func, *args, **kwargs)
if isinstance(expr, mathml_apply) and expr.operator().localName == operator:
func(expr, *args, **kwargs)
def _find_or_create_variable(self, cname, vname, source):
"""Find a given variable in the model, creating it if necessary.
We look for a variable in the component named cname with the same name as the source.
If it doesn't exist, a variable named vname will be created in that component (unless
it already exists).
The variable will become a mapped variable with the given source.
Hence if it is created it will have the same units.
"""
try:
var = self.model.get_variable_by_name(cname, source.name)
raise KeyError()
except KeyError:
# Have we created it already?
try:
var = self.model.get_variable_by_name(cname, vname)
except KeyError:
# Create it and add to model
units = source.component.get_units_by_name(source.units)
var = self.add_variable(cname, vname, units)
return var
def add_variable(self, comp, vname, units, **kwargs):
"""Add a new variable to the given component.
Remaining arguments are as for cellml_variable.create_new.
Returns the new variable object.
"""
if not isinstance(comp, cellml_component):
comp = self.model.get_component_by_name(comp)
units = self.add_units(units)
var = cellml_variable.create_new(comp, vname, units.name, **kwargs)
comp._add_variable(var)
return var
def _get_units_object(self, units):
"""Helper function to convert a units specification into a cellml_units object.
The input can be a cellml_units object, in which case we just return it.
However, it can also be a serialised CellML units definition, in which case it
will be parsed to create the object.
"""
if isinstance(units, cellml_units):
# We're done
pass
else:
units = amara_parse_cellml(unicode(units))
assert isinstance(units, cellml_units)
return units
def add_units(self, units):
"""Add a units definition to the model, if it doesn't already exist.
If the definition isn't in the model, at whole-model level, it will be added. If the same
definition is already available, however, that definition should be used by preference.
Will return the actual units object to use.
"""
units = self.model._get_units_obj(units)
try:
model_units = self.model.get_units_by_name(units.name)
except KeyError:
model_units = None
if model_units:
assert units.uniquify_tuple == model_units.uniquify_tuple
units = model_units
else:
units.name = self._uniquify_name(units.name, self.model.get_units_by_name)
self.model.add_units(units.name, units)
self.model.xml_append(units)
# Ensure referenced units exist
for unit in getattr(units, u'unit', []):
unit._set_units_element(self.add_units(unit.get_units_element()), override=True)
unit.units = unit.get_units_element().name
return units
def add_expr_to_comp(self, comp, expr):
"""Add an expression to the mathematics in the given component.
comp may be a cellml_component instance or a component name.
"""
if not isinstance(comp, cellml_component):
comp = self.model.get_component_by_name(comp)
if not hasattr(comp, u'math'):
# Create the math element
math = comp.xml_create_element(u'math', NSS[u'm'])
comp.xml_append(math)
# Append this expression
comp.math.xml_append(expr)
def remove_expr(self, expr):
"""Remove an expression (ODE or assignment) from its parent."""
assert isinstance(expr, mathml_apply)
if expr.xml_parent:
expr.xml_parent.safe_remove_child(expr)
expr.xml_parent = None # Not done by Amara...
return expr
def remove_definition(self, var, keep_initial_value=False):
"""Remove any existing definition (as an equation) of the given variable.
If keep_initial_value is False, then also remove any initial_value attribute.
If the variable is Mapped, throw a ModelModificationError.
"""
if var.get_type() == VarTypes.Mapped:
raise ModelModificationError("Cannot remove the equation defining a mapped variable - remove the definition of its source instead")
if not keep_initial_value:
self.del_attr(var, u'initial_value')
# Note: if this is a variable added by a protocol, then it shouldn't have
# any dependencies set up yet, so this is a no-op.
for dep in var.get_all_expr_dependencies():
self.remove_expr(dep)
# We know don't know how it will be defined
var.clear_dependency_info()
def del_attr(self, elt, localName, ns=None):
"""Delete an XML attribute from an element, if it exists."""
for (pyname, (qname, ns_)) in elt.xml_attributes.items():
_, name = SplitQName(qname)
if ns_ == ns and name == localName:
delattr(elt, pyname)
def _uniquify_var_name(self, varname, comp):
"""Ensure varname is unique within the given component.
Underscores will be appended to the name until it is unique. The unique name will be returned.
"""
return self._uniquify_name(varname, comp.get_variable_by_name)
def _uniquify_name(self, name, callable):
"""Ensure the given name is unique within a particular context.
The context is determined by the given function: it will be passed candidate names to test
for existence, and is expected to throw iff the name is not already used. Underscores will
be appended to the given name until callable throws, and the resulting unique name returned.
"""
while True:
try:
callable(name)
name += u'_'
except:
break
return name
def set_units_converter(self, converter):
"""Set the object used to units-convert variable initial values."""
self._units_converter = converter
def get_units_converter(self):
"""Get the units converter object, if any has been set."""
if not self._units_converter:
raise ModelModificationError("No units converter has been set.")
return self._units_converter
def _convert_initial_value(self, var, units, do_conversion=True):
"""Convert any initial value of the given variable into the given units.
If there is no initial value, returns None.
If there is no units converter, leaves the initial_value unchanged.
"""
if not hasattr(var, u'initial_value'):
return None
value = var.initial_value
if value and self._units_converter and do_conversion:
if not var.get_units().equals(units):
try:
value = self._units_converter.convert_constant(value, var.get_units(), units, var.component)
except EvaluationError, e:
raise ModelModificationError("Cannot units-convert initial value as requires run-time information:\n"
+ str(e))
return unicode(value)
class InterfaceGenerator(ModelModifier):
"""Class for generating an interface between a CellML model and external code.
This contains functionality for users to describe the interface desired by the external code, i.e.
which variables are inputs and/or outputs, and expected units. It will then create a new component
within the CellML model containing these variables, and add units conversions where required. The
external code then only needs to interact with this new component.
"""
def __init__(self, model, name='interface', units_converter=None):
super(InterfaceGenerator, self).__init__(model)
self._interface_component = None
self._interface_component_name = name
self.set_units_converter(units_converter)
def add_input(self, var, units, annotate=True, convert_initial_value=True):
"""Specify a variable as an input to the model.
var should be a cellml_variable object already existing in the model.
units should be a suitable input to self._get_units_object.
If adding both State and Free variables as inputs, make sure to add the Free variable first,
otherwise you will not be able to specify units for it.
Set annotate to False if you do not wish a Constant variable to be annotated as a modifiable
parameter.
If a units converter has been supplied, we will also try to units-convert initial values.
This may not be possible if special conversions are used, since they may involve variables
whose values are not known at this time. If this is the case, set convert_initial_value to
False to avoid applying the conversion. A proper solution requires CellML 1.1 features.
The new variable added to the interface component is returned.
"""
assert isinstance(var, cellml_variable)
units = self._get_units_object(units)
var = var.get_source_variable(recurse=True) # Ensure we work with source variables only
var_name = var.fullname(cellml=True)
# Check that the variable has a suitable type to be an input
t = var.get_type()
if t == VarTypes.Computed:
raise ModelModificationError("Cannot specify computed variable " + var.fullname() + " as an input")
elif t not in [VarTypes.Constant, VarTypes.Free, VarTypes.State]:
raise ModelModificationError("Variable " + var.fullname() + " has unexpected type " + str(t))
# Add a new variable with desired units to the interface component
comp = self.get_interface_component()
newvar = self.add_variable(comp, var_name, units, id=var.cmeta_id,
initial_value=self._convert_initial_value(var, units, convert_initial_value),
interfaces={u'public': u'out'})
newvar._set_type(t)
# Remove initial value and id from the original, if they exist
self.del_attr(var, u'initial_value')
self.del_attr(var, u'id', NSS['cmeta'])
# If the original variable was a state variable, split the defining equation
if t == VarTypes.State:
self._split_ode(newvar, var)
# Annotate the new variable as a parameter if the original was a constant
if t == VarTypes.Constant and annotate:
newvar.set_is_modifiable_parameter(True)
self._update_connections(var, newvar)
return newvar
def add_output(self, var, units, annotate=True):
"""Specify a variable as an output of the model.
var should be a cellml_variable object already existing in the model.
units should be a suitable input to self._get_units_object.
The new variable will take the cmeta:id of the original, and hence existing metadata
annotations will refer to the new variable.
If annotate is set to True, the new variable will also be annotated as a derived quantity.
The new variable added to the interface component is returned.
"""
assert isinstance(var, cellml_variable)
units = self._get_units_object(units)
var = var.get_source_variable(recurse=True)
var_name = var.fullname(cellml=True)
comp = self.get_interface_component()
newvar = self.add_variable(comp, var_name, units, id=var.cmeta_id)
self.del_attr(var, u'id', NSS['cmeta'])
self.connect_variables(var, newvar)
if annotate:
newvar.set_is_derived_quantity(True)
return newvar
def add_output_function(self, resultName, operator, argVars, units):
"""Add an output that's defined as a (MathML) function of existing model variables.
The desired units are those of the function's result. The function arguments will be
imported with their units as given by the model, and the function calculated. This result
will then be units-converted if necessary.
The new variable added to the interface component is returned.
"""
# Add the result variable
comp = self.get_interface_component()
units = self._get_units_object(units)
result_var = self.add_variable(comp, resultName, units)
result_var.set_pe_keep(True)
# Map the argument variables
operands = []
for var in argVars:
operands.append(self.add_output(var, var.get_units(), annotate=False).name)
# Create the new function and assign it to result_var
expr = mathml_apply.create_new(self.model, operator, operands)
assign = mathml_apply.create_new(self.model, u'eq', [result_var.name, expr])
self.add_expr_to_comp(comp, assign)
return result_var
def make_var_constant(self, var, value):
"""Turn a variable into a constant."""
self.remove_definition(var)
var.clear_dependency_info()
var.initial_value = unicode(str(value))
var._set_type(VarTypes.Constant)
def make_var_computed_constant(self, var, value):
"""Turn a variable into a Computed variable with constant value definition."""
self.remove_definition(var)
var.clear_dependency_info()
defn = mathml_apply.create_new(self.model, u'eq',
[var.name, (unicode(str(value)), var.get_units().name)])
self.add_expr_to_comp(var.component, defn)
var._set_type(VarTypes.Computed)
def finalize(self, *args, **kwargs):
"""Override finalize to also set up standard interface elements not defined individually."""
self._add_all_odes_to_interface()
self._transform_derivatives_on_rhs()
super(InterfaceGenerator, self).finalize(*args, **kwargs)
def _transform_derivatives_on_rhs(self):
"""Transform any equations with derivatives on the RHS to use the variable defining it instead.
self._split_ode must have been used for all derivatives before calling this method. This means
that each ODE now has a variable to which the RHS is assigned. Rather than using the derivative
directly, which could break the dependency chain if units conversions are used for time, equations
should refer to this new variable instead.
"""
for expr in self.model.search_for_assignments():
self._process_operator(list(expr.operands())[1], u'diff', self._transform_derivative_on_rhs)
def _transform_derivative_on_rhs(self, expr):
"""Transform a derivative on the RHS of an equation to refer to the defining variable.
Helper method used by self._transform_derivatives_on_rhs to do the actual transformation.
"""
# Find the variable to use
dep_var = expr.diff.dependent_variable.get_source_variable(recurse=True)
indep_var = expr.diff.independent_variable.get_source_variable(recurse=True)
ode = dep_var.get_ode_dependency(indep_var)
rhs_var = ode.eq.rhs.variable.get_source_variable(recurse=True)
# Ensure there's something mapped to it in this component
rhs_var = self.connect_variables(rhs_var, (expr.component.name, rhs_var.name))
# Update this expression
parent = expr.xml_parent
parent.xml_insert_after(expr, mathml_ci.create_new(parent, rhs_var.name))
parent.safe_remove_child(expr)
def _split_ode(self, newVar, oldVar):
"""Split an ODE definition so the derivative goes into the interface component.
The RHS stays where it is, and is assigned to a new variable, which is connected to the interface
component and assigned to the new derivative. newVar is the new state variable in the interface
component, and oldVar will soon be mapped to it by the caller.
Any other equations in the model which use the derivative are transformed to use the new variable
instead.
"""
# Get the free variable in the interface component
free_var = self.model.find_free_vars()[0]
if free_var.component is not newVar.component:
free_var = self.add_input(free_var, free_var.get_units())
# Add a new variable to assign the RHS to, with units of the original derivative
deriv_name = self._uniquify_var_name(u'd_%s_d_%s' % (oldVar.name, free_var.name), oldVar.component)
orig_ode = oldVar.get_all_expr_dependencies()[0]
orig_rhs_var = self.add_variable(oldVar.component, deriv_name, orig_ode.eq.lhs.get_units().extract())
# Add an output version of this in the interface, with desired units
desired_units = newVar.get_units().quotient(free_var.get_units())
mapped_rhs_var = self.add_output(orig_rhs_var, desired_units, annotate=False)
# Replace the original ODE with an assignment
orig_rhs = orig_ode.eq.rhs
orig_ode.safe_remove_child(orig_rhs)
self.remove_expr(orig_ode)
self.add_expr_to_comp(oldVar.component,
mathml_apply.create_new(self.model, u'eq',
[orig_rhs_var.name, orig_rhs]))
# Create a new ODE in the interface component
new_ode = mathml_diff.create_new(self.model, free_var.name, newVar.name, mapped_rhs_var.name)
self.add_expr_to_comp(newVar.component, new_ode)
new_ode.classify_variables(root=True, dependencies_only=True)
def _add_all_odes_to_interface(self):
"""All the derivatives should be considered as model outputs, and state variables as model inputs.
For any that haven't been done explicitly, this method will add the corresponding state variable
as an input, with its original units, which has the desired effect.
"""
comp = self.get_interface_component()
for var in self.model.find_state_vars():
if var.component is not comp:
self.add_input(var, var.get_units())
def get_interface_component(self):
"""Get the new component that will contain the interface.
The name will be self._interface_component_name, unless a component with that name already exists,
in which case underscores will be added to the component name to make it unique.
"""
if self._interface_component is None:
self._interface_component = self.create_new_component(unicode(self._interface_component_name))
self.model.interface_component_name = unicode(self._interface_component_name)
assert not self._interface_component.ignore_component_name
return self._interface_component
class UnitsConverter(ModelModifier):
"""Top-level interface to the units conversion code in PyCml.
"""
def __init__(self, model, warn_only=None, show_xml_context_only=False):
super(UnitsConverter, self).__init__(model)
if warn_only is None:
warn_only = model.get_option('warn_on_units_errors')
self.warn_only = warn_only
self.show_xml_context_only = show_xml_context_only
self.special_conversions = {}
self._setup_logger()
self._converted_mappings = set()
def __del__(self):
self._cleanup_logger()
def _setup_logger(self):
logger = logging.getLogger('units-converter')
logger.setLevel(logging.WARNING)
formatter = logging.Formatter(fmt="%(name)s: %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
self._log_handler = handler
def _cleanup_logger(self):
"""Flush logger & remove handler."""
logger = logging.getLogger('units-converter')
self._log_handler.flush()
logger.removeHandler(self._log_handler)
def try_convert(self, func, *args, **kwargs):
"""Call the given function, and log any units errors produced."""
try:
func(*args, **kwargs)
except UnitsError, e:
if self.show_xml_context_only:
e.show_xml_context_only()
if self.warn_only:
e.warn = True
e.level = logging.WARNING
logging.getLogger('units-converter').log(e.level, unicode(e).encode('UTF-8'))
def _apply_special_conversion_for_nested_expr(self, expr, defn_units, desired_units):
"""Apply a special conversion to the given (sub-)expression.
This will get called by mathml_units_mixin._add_units_conversion if a special conversion is required by a nested sub-expression.
"""
for from_units, to_units in self.special_conversions.iterkeys():
if (from_units.dimensionally_equivalent(defn_units)
and to_units.dimensionally_equivalent(desired_units)):
# We can apply this conversion
expr = self.special_conversions[(from_units, to_units)](expr)
DEBUG('units-converter', "Used nested special conversion from", repr(from_units), "to", repr(to_units))#, "giving", expr.xml())
break
# else:
# print "No on nested conv from", repr(from_units), "to", repr(to_units)
return expr
def _check_special_conversion(self, expr):
"""Check whether a special conversion applies to the given assignment.
Special conversions allow us to do units conversion between dimensionally non-equivalent
quantities, by utilising biological knowledge. Available special conversions are added
using the add_special_conversion method.
"""
lhs_units = expr.eq.lhs.get_units()
rhs_units = expr.eq.rhs.get_units()
if lhs_units.dimensionally_equivalent(rhs_units):
return
for from_units, to_units in self.special_conversions.iterkeys():
if (from_units.dimensionally_equivalent(rhs_units)
and to_units.dimensionally_equivalent(lhs_units)):
# We can apply this conversion
self.special_conversions[(from_units, to_units)](expr)
DEBUG('units-converter', "Used special conversion from", repr(from_units), "to", repr(to_units))#, "giving", expr.xml())
break
# else:
# print "No on conv from", repr(from_units), "to", repr(to_units)
def add_special_conversion(self, from_units, to_units, converter):
"""Add a new special conversion to the list available.
Special conversions allow us to do units conversion between dimensionally non-equivalent
quantities, by utilising biological knowledge. The function "converter" will be called with
an assignment (top-level mathml_apply instance) that has RHS units equivalent to from_units,
and LHS units equivalent to to_units. It should alter the equation in-place (i.e. the
object passed to it must contain the final equation) to do an appropriate units conversion,
at least so that LHS and RHS dimensions match.
"""
self.special_conversions[(from_units, to_units)] = converter
def modify_rhs(self, expr, operator, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, replacing the RHS by an application of the given operator.
The operands will be the existing RHS and a ci element referencing the supplied variable object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
assert isinstance(var, cellml_variable)
# Ensure var is available in expr's component
local_var_name = var.name
source_comp = var.component
expr_comp = expr.component
if source_comp != expr_comp:
local_var = self.connect_variables(var, (expr_comp.name, var.fullname(cellml=True)))
local_var_name = local_var.name
# Change expr
rhs = expr.eq.rhs
expr.safe_remove_child(rhs)
new_rhs = mathml_apply.create_new(var.model, operator, [rhs, local_var_name])
expr.xml_append(new_rhs)
return expr
def times_rhs_by(self, expr, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, post-multiplying the RHS by a reference to the given variable object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
return self.modify_rhs(expr, u'times', var)
def divide_rhs_by(self, expr, var):
"""Helper method of use to special units conversions.
Will modify the given expr in-place, post-dividing the RHS by a reference to the given variable
object.
Connections and variables will be added to ensure that the given variable is available in the
component in which expr appears.
Returns expr, for ease of chaining expressions.
"""
return self.modify_rhs(expr, u'divide', var)
def convert_assignments(self, exprs):
"""Apply conversions to any assignments in the given iterable."""
boolean = self.model.get_units_by_name('cellml:boolean')
for expr in exprs:
if isinstance(expr, mathml_apply):
# print 'Converting? assignment', element_xpath(expr)
if self.special_conversions:
self.try_convert(self._check_special_conversion, expr)
self.try_convert(expr._set_in_units, boolean)
def convert_constant(self, value, from_units, to_units, comp):
"""Convert a constant value into desired units."""
from_units = self.add_units(from_units)
to_units = self.add_units(to_units)
expr = mathml_apply.create_new(self.model, u'eq', [(u'0', to_units.name),
(unicode(value), from_units.name)])
self.add_expr_to_comp(comp, expr)
# Nasty hack to make expr.is_top_level return True
expr._cml_assigns_to = expr.operands().next()
if self.special_conversions:
self.try_convert(self._check_special_conversion, expr)
self.try_convert(expr.eq.rhs._set_in_units, to_units)
self.remove_expr(expr)
return expr.eq.rhs.evaluate()
def convert_mapping(self, mapping, comp1, comp2, var1, var2):
"""Apply conversions to a mapping between two variables."""
model = self.model
# Check for being already converted
var_pair = frozenset([var1, var2])
if var_pair in self._converted_mappings:
DEBUG('units-converter', 'Skipping already converted mapping', var1, '<->', var2)
return
else:
self._converted_mappings.add(var_pair)
# Ensure mapping is var1 := var2; swap vars if needed
swapped = False
try:
if var2.get_source_variable() is var1:
swapped = True
var1, var2 = var2, var1
comp1, comp2 = comp2, comp1
except TypeError:
pass
# Get units
u1 = var1.get_units()
u2 = var2.get_units()
DEBUG('units-converter', "Converting mapping of", var1, ":=", var2,
"(units:", repr(u1), repr(u2), ")")
if not u1.equals(u2):
# We need a conversion
# Add a copy of var1 to comp1, with units as var2
if getattr(var1, u'public_interface', '') == u'in':
in_interface = u'public'
else:
in_interface = u'private'
var1_converter = self.add_variable(comp1, var1.name + u'_converter', u2, interfaces={in_interface: u'in'})
var1._cml_var_type = VarTypes.Computed
var1._cml_source_var = None
delattr(var1, in_interface + u'_interface')
var1_converter._set_source_variable(var2)
# Add assignment maths for var1 := var1_converter
app = mathml_apply.create_new(model, u'eq', [var1.name, var1_converter.name])
self.add_expr_to_comp(comp1, app)
var1._cml_depends_on = [app]
app._cml_assigns_to = var1
# Update mapping to var1_converter := var2
if swapped:
mapping.variable_2 = var1_converter.name
else:
mapping.variable_1 = var1_converter.name
# Fix usage counts - var1_converter is only used by app, and so var2 usage decreases
var1_converter._used()
for _ in xrange(var1.get_usage_count()):
var2._decrement_usage_count()
# Apply units conversion to the assignment
self.convert_assignments([app])
# Add the assignment into the sorted list
assignments = model.get_assignments()
idx = assignments.index(var1)
assignments[idx:idx+1] = [var1_converter, app]
def convert_connections(self, connections):
"""Add units conversions for all connections in the given set.
:param connections: a set of variable pairs representing connections. For each pair of variables a units conversion
will be added if needed and not already performed.
"""
model = self.model
for conn in getattr(model, u'connection', []):
comp1 = model.get_component_by_name(conn.map_components.component_1)
comp2 = model.get_component_by_name(conn.map_components.component_2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)
var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)
if frozenset([var1, var2]) in connections:
self.convert_mapping(mapping, comp1, comp2, var1, var2)
def add_conversions_for_component(self, comp):
"""Add all units conversions required by the given component.
This allows us to only apply the conversions required by an interface component created
by an InterfaceGenerator.
"""
model = self.model
if self.special_conversions:
self.model._cml_special_units_converter = self._apply_special_conversion_for_nested_expr
assignments = model.search_for_assignments(comp)
self.convert_assignments(assignments)
if self.special_conversions:
del self.model._cml_special_units_converter
for conn in getattr(model, u'connection', []):
cname1 = conn.map_components.component_1
cname2 = conn.map_components.component_2
if comp.name in [cname1, cname2]:
comp1 = model.get_component_by_name(cname1)
comp2 = model.get_component_by_name(cname2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(cname1, mapping.variable_1)
var2 = model.get_variable_by_name(cname2, mapping.variable_2)
self.convert_mapping(mapping, comp1, comp2, var1, var2)
def add_all_conversions(self):
"""Add all units conversions required in the given model."""
model = self.model
# Mathematical expressions
self.convert_assignments(model.get_assignments())
# Connections
for conn in getattr(model, u'connection', []):
comp1 = model.get_component_by_name(conn.map_components.component_1)
comp2 = model.get_component_by_name(conn.map_components.component_2)
for mapping in conn.map_variables:
var1 = model.get_variable_by_name(comp1.name, mapping.variable_1)
var2 = model.get_variable_by_name(comp2.name, mapping.variable_2)
self.convert_mapping(mapping, comp1, comp2, var1, var2)
return
| python/pycml/processors.py | 50,918 | We want to see any errors Re-run validation & analysis Clear up logging Component with this name doesn't exist Create the component print "connect_variables(", src_cname, src_vname, "to", target_cname, target_vname, ")" Determine encapsulation paths from target & source to the root print "paths: src=", map(lambda c: c and c.name, src_path), map(lambda c: c and c.name, target_path) At some point these will share a common path, even if it's just the root itself Construct path from source to target, leaving out the root (None) Traverse this path, adding connections at each step print "conn", map(lambda c: c.name, path), next_src_var, src_vname, target_vname print "step", i, "from", next_src_var, "to", target_comp.name, target_vname print "step", i, "made", next_src_var print "_make_conn", src_var, target_var, target_comp.name, target_vname Sanity check the target variable print "Connection exists between", src_var, "and target", target_var We've created this variable, so should be ok, but check for gotchas One special case: if the src_var is actually obtained from a different component at this level or above, in which case we should use the real source, not that given. Check and set the interface attributes print "Connecting source", src_var, src_if, getattr(src_var, src_if + u'_interface', u'none'), src_var.units, print "to", target_var, target_if, getattr(target_var, target_if + u'_interface', u'none'), target_var.units Create the connection element Ensure we handle a later connection attempt between these variables correctly Naming conflict; try again with a different target name Find the relevant map_variables element Found a connection There can't be any more matching map_variables in this connection Remove old connections, including interfaces and types so creating the new connection works Create new connections One list is the tail of the other i now gives the last differing element Have we created it already? Create it and add to model We're done Ensure referenced units exist Create the math element Append this expression Not done by Amara... Note: if this is a variable added by a protocol, then it shouldn't have any dependencies set up yet, so this is a no-op. We know don't know how it will be defined Ensure we work with source variables only Check that the variable has a suitable type to be an input Add a new variable with desired units to the interface component Remove initial value and id from the original, if they exist If the original variable was a state variable, split the defining equation Annotate the new variable as a parameter if the original was a constant Add the result variable Map the argument variables Create the new function and assign it to result_var Find the variable to use Ensure there's something mapped to it in this component Update this expression Get the free variable in the interface component Add a new variable to assign the RHS to, with units of the original derivative Add an output version of this in the interface, with desired units Replace the original ODE with an assignment Create a new ODE in the interface component We can apply this conversion, "giving", expr.xml()) else: print "No on nested conv from", repr(from_units), "to", repr(to_units) We can apply this conversion, "giving", expr.xml()) else: print "No on conv from", repr(from_units), "to", repr(to_units) Ensure var is available in expr's component Change expr print 'Converting? assignment', element_xpath(expr) Nasty hack to make expr.is_top_level return True Check for being already converted Ensure mapping is var1 := var2; swap vars if needed Get units We need a conversion Add a copy of var1 to comp1, with units as var2 Add assignment maths for var1 := var1_converter Update mapping to var1_converter := var2 Fix usage counts - var1_converter is only used by app, and so var2 usage decreases Apply units conversion to the assignment Add the assignment into the sorted list Mathematical expressions Connections | 4,121 | en | 0.763123 |
#!/usr/bin/env python
#
# Run wasm benchmarks in various configurations and report the times.
# Run with -h for help.
#
# Note: this is a copy of wasm-bench.py adapted for d8.
#
# In the default mode which is "turbofan+liftoff", runs a single shell with
# `--no-wasm-tier-up --liftoff` and `--no-wasm-tier-up --no-liftoff`
# and prints three tab-separated columns:
#
# Ion-result Baseline-result Ion/Baseline
#
# In other benchmarking modes, runs one or two shells with the same argument
# (depending on the mode) and prints three tab-separated columns:
#
# shell1-result shell2-result shell1-result/shell2-result
#
# When measuring compile times (argument = 0) results are compile
# times in ms.
#
# When measuring run times (argument > 0) results are mostly running
# times in ms, except that linpack is 1000000/mflops and scimark is
# 10000/score, always as integer values.
#
# A lower result is always better. Linpack and SciMark outputs are
# inverted to make this consistent.
#
# We measure the running time only for the already-compiled wasm code,
# not the end-to-end time including startup and compilation. The
# difference in ratios is actually not large, but running time is the
# best measure.
#
# TODO: Annotate results with - and +, derived from
# the variance maybe. Switch -s / --significance.
#
# TODO: catch any exception from the subprocess and print the log if
# there was one.
#
# TODO: Also check the output for other arguments than the default.
# Easy: use list of results for for the problems, indexed by problem size
#
# TODO: In several cases below we'd like to check the entire output,
# not just one line of it. Easy: lists of lines, match in order.
#
# TODO: We might like for the output not to contain any other lines than
# the ones we are grepping for. Not very hard - just a flag.
import argparse, os, re, subprocess, sys
def main():
(mode, numruns, argument, isVerbose, noThreads, dumpData, dumpVariance, dumpRange, patterns) = parse_args()
(shell1, shell2) = get_shells(mode)
print "# mode=%s, runs=%d, problem size=%s" % (mode, numruns, (str(argument) if argument != None else "default"))
if not is_check(mode):
print "# Lower score is better"
for test in tests:
(name, _, fn, _) = test
found = len(patterns) == 0
for p in patterns:
found = found or re.search(p, name)
if not found:
continue
msg = name + "\t" + ("\t" if len(name) < 8 else "")
if is_check(mode):
fn(test, isVerbose, noThreads, shell1, get_system1(mode), argument)
msg += "did not crash today"
else:
# Run back-to-back for each shell to reduce caching noise
t1 = []
for i in range(numruns):
(c, r) = fn(test, isVerbose, noThreads, shell1, get_system1(mode), argument)
t1.append(c if argument == 0 else r)
t1.sort()
t2 = []
if not is_only(mode):
for i in range(numruns):
(c, r) = fn(test, isVerbose, noThreads, shell2, get_system2(mode), argument)
t2.append(c if argument == 0 else r)
t2.sort()
n1 = t1[len(t1)/2]
n2 = 1
if not is_only(mode):
n2 = t2[len(t2)/2]
score = three_places(n1, n2)
msg += str(n1) + "\t"
if not is_only(mode):
msg += str(n2) + "\t"
msg += score
if dumpVariance:
lo1 = t1[1]
hi1 = t1[len(t1)-2]
msg += "\t[" + three_places(lo1, n1) + ", " + three_places(hi1, n1) + "]"
if not is_only(mode):
lo2 = t2[1]
hi2 = t2[len(t2)-2]
msg += "\t[" + three_places(lo2, n2) + ", " + three_places(hi2, n2) + "]"
if dumpRange:
lo1 = t1[1]
hi1 = t1[len(t1)-2]
msg += "\t[" + str(lo1) + ", " + str(hi1) + "]"
if not is_only(mode):
lo2 = t2[1]
hi2 = t2[len(t2)-2]
msg += "\t[" + str(lo2) + ", " + str(hi2) + "]"
if dumpData:
msg += "\t" + str(t1)
if not is_only(mode):
msg += "\t" + str(t2)
print msg
def three_places(a, b):
if b == 0:
return "-----"
return str(round(float(a)/float(b)*1000)/1000)
def run_std(test, isVerbose, noThreads, shell, mode, argument):
(name, program, _, correct) = test
if program == None:
program = "wasm_" + name + ".js"
text = run_test(isVerbose, noThreads, shell, program, mode, argument)
return parse_output(text, argument, correct)
def run_linpack(test, isVerbose, noThreads, shell, mode, argument):
text = run_test(isVerbose, noThreads, shell, "wasm_linpack_float.c.js", mode, argument)
if argument == 0:
return parse_output(text, 0, None)
mflops = float(parse_line(text, r"Unrolled +Single +Precision.*Mflops", 4))
score = int(10000000.0/mflops)
return (0,score)
def run_scimark(test, isVerbose, noThreads, shell, mode, argument):
text = run_test(isVerbose, noThreads, shell, "wasm_lua_scimark.c.js", mode, argument)
if argument == 0:
return parse_output(text, 0, None)
mark = float(parse_line(text, r"SciMark.*small", 2))
score = int(100000.0/mark)
return (0,score)
tests = [ ("box2d", None, run_std, r"frame averages:.*, range:.* to "),
("bullet", None, run_std, r"ok.*"),
("conditionals", None, run_std, r"ok 144690090"),
("copy", None, run_std, r"sum:2836"),
("corrections", None, run_std, r"final: 40006013:10225."),
("fannkuch", None, run_std, r"4312567891011"),
("fasta", None, run_std, r"CCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT"),
("fib", "fib.js", run_std, r"fib.40. = 102334155"),
("ifs", None, run_std, r"ok"),
#("linpack", None, run_linpack, None),
("binarytrees", "wasm_lua_binarytrees.c.js", run_std, "843\t trees of depth 10\t check: -842"),
#("scimark", None, run_scimark, None),
("memops", None, run_std, r"final: 400."),
("primes", None, run_std, r"lastprime: 3043739."),
("raybench", "raybench.js", run_std, r"Render time: .*"),
("rust-fannkuch", "rust-fannkuch.js", run_std, r"fannkuch\(11\) = 556355"),
("skinning", None, run_std, r"blah=0.000000"),
("zlib", "wasm_zlib.c.js", run_std, r"sizes: 100000,25906") ]
def run_test(isVerbose, noThreads, shell, program, mode, argument):
cmd = [shell]
if mode == "liftoff":
cmd.append("--no-wasm-tier-up")
# Flag --liftoff is implied by --single_threaded
cmd.append("--liftoff")
if mode == "turbofan":
cmd.append("--no-wasm-tier-up")
cmd.append("--no-liftoff")
if noThreads:
cmd.append("----wasm_num_compilation_tasks=1")
cmd.append(program)
if argument != None:
cmd.append("--")
cmd.append(str(argument))
if isVerbose:
print "# %s" % str(cmd)
log = open('output.tmp', 'w')
text = subprocess.check_output(cmd, stderr=log, universal_newlines=True).split("\n")
log.close()
return text
def parse_output(text, argument, correct):
compileTime = 0
runTime = 0
found = False
do_check = argument == None and correct
for t in text:
if do_check and not found:
found = re.match(correct, t)
if re.match("WASM COMPILE TIME: ", t):
compileTime = int(t[19:])
elif re.match("WASM RUN TIME: ", t):
runTime = int(t[15:])
if do_check and not found:
print text
panic("Did not match expected output " + correct)
return (compileTime, runTime)
def parse_line(text, correct, fieldno):
for t in text:
if re.match(correct, t):
return re.split(r" +", t)[fieldno-1]
panic("Did not match expected output " + correct)
def get_shells(mode):
shell1 = None
shell2 = None
if uses_one_shell(mode):
shell1 = get_shell("JS_SHELL")
shell2 = shell1
else:
shell1 = get_shell("JS_SHELL1")
shell2 = get_shell("JS_SHELL2")
return (shell1, shell2)
def get_shell(name):
probe = os.getenv(name)
if not (probe and os.path.isfile(probe) and os.access(probe, os.X_OK)):
panic(name + " does not name an executable shell")
return probe
def is_check(mode):
return mode == "ion_check" or mode == "baseline_check" or mode == "cranelift_check"
def uses_one_shell(mode):
if is_check(mode) or is_only(mode):
return True
if get_system1(mode) != get_system2(mode):
return True
return False
def get_system1(mode):
if re.search(r"_|\+", mode):
return re.split(r"_|\+", mode)[0]
return mode
def get_system2(mode):
if re.search(r"\+", mode):
return re.split(r"\+", mode)[1]
panic("Mode does not have a second system: " + mode)
def is_only(mode):
return mode == "liftoff_only" or mode == "turbofan_only"
def panic(msg):
sys.exit("Error: " + msg)
def parse_args():
parser = argparse.ArgumentParser(description=
"""Run wasm benchmarks in various configurations.
When a single JS shell is needed the default program name is 'js';
otherwise it can be overridden with the environment variable JS_SHELL.
When two shells are needed they must be named by the environment
variables JS_SHELL1 and JS_SHELL2.""")
parser.add_argument("-a", "--problem", metavar="argument", type=int, help=
"""The problem size argument. The default is 3. With argument=0 we
effectively only compile the code and compilation time is reported
instead. The max is 5.""")
parser.add_argument("-c", "--check", metavar="mode", choices=["liftoff", "turbofan", "turbofan+liftoff"], help=
"""Run only one shell a single run, to see if it works. `mode` must
be "ion" or "baseline" or "cranelift".""")
parser.add_argument("-d", "--data", action="store_true", help=
"""Print the measurement data as two comma-separated lists following
the normal results.""")
parser.add_argument("-i", "--variance", action="store_true", help=
"""For five or more runs, discard the high and low measurements and
print low/median and high/median following the standard columns.""")
parser.add_argument("-j", "--range", action="store_true", help=
"""For five or more runs, discard the high and low measurements and
print low and high following the standard columns.""")
parser.add_argument("-m", "--mode", metavar="mode",
choices=["liftoff", "turbofan", "turbofan+liftoff"],
help=
"""Compare the output of two different shells.
`mode` must be "liftoff", "turbofan", or "turbofan+liftoff"
where a and b are one of those systems. A single system a means a+a.""")
parser.add_argument("-n", "--numruns", metavar="numruns", type=int, help=
"""The number of iterations to run. The default is 1. The value
should be odd. We report the median time.""")
parser.add_argument("-o", "--only", metavar="mode", choices=["liftoff", "turbofan", "turbofan+liftoff"], help=
"""Run only the one shell in the normal manner, and report results
according to any other switches""")
parser.add_argument("-v", "--verbose", action="store_true", help=
"""Verbose. Echo commands and other information on stderr.""")
parser.add_argument("-t", "--no-threads", action="store_true", help=
"""Disable threads in the shell, for added timing stability.
This will significantly impact compile times, and may impact running
time since eg GC runs on the remaining thread with everything else.""")
parser.add_argument("pattern", nargs="*", help=
"""Regular expressions to match against test names""")
args = parser.parse_args();
if args.check and args.mode:
panic("--check and --mode are incompatible")
if args.check and args.only:
panic("--check and --only are incompatible")
if args.mode and args.only:
panic("--mode and --only are incompatible")
mode = "turbofan+liftoff"
if args.mode:
if re.search(r"\+", args.mode):
mode = args.mode
else:
mode = args.mode + "+" + args.mode
if args.check:
mode = args.check + "_check"
if args.only:
mode = args.only + "_only"
if args.check and args.variance:
panic("--check and --variance are incompatible")
if args.check and args.range:
panic("--check and --range are incompatible")
numruns = 1
if args.numruns != None:
if args.numruns <= 0:
panic("--numruns requires a nonnegative integer")
numruns = args.numruns
if is_check(mode):
numruns = 1
if not (numruns % 2):
panic("The number of runs must be odd")
if args.variance and numruns < 5:
panic("At least five runs required for --variance")
if args.range and numruns < 5:
panic("At least five runs required for --range")
argument = None
if args.problem != None:
if args.problem < 0 or args.problem > 5:
panic("--problem requires an integer between 0 and 5")
argument = args.problem
if args.verbose:
args.data = True
return (mode, numruns, argument, args.verbose, args.no_threads, args.data, args.variance, args.range, args.pattern)
if __name__ == '__main__':
main()
| asm_v_wasm/wasm_bench-d8.py | 14,443 | !/usr/bin/env python Run wasm benchmarks in various configurations and report the times. Run with -h for help. Note: this is a copy of wasm-bench.py adapted for d8. In the default mode which is "turbofan+liftoff", runs a single shell with `--no-wasm-tier-up --liftoff` and `--no-wasm-tier-up --no-liftoff` and prints three tab-separated columns: Ion-result Baseline-result Ion/Baseline In other benchmarking modes, runs one or two shells with the same argument (depending on the mode) and prints three tab-separated columns: shell1-result shell2-result shell1-result/shell2-result When measuring compile times (argument = 0) results are compile times in ms. When measuring run times (argument > 0) results are mostly running times in ms, except that linpack is 1000000/mflops and scimark is 10000/score, always as integer values. A lower result is always better. Linpack and SciMark outputs are inverted to make this consistent. We measure the running time only for the already-compiled wasm code, not the end-to-end time including startup and compilation. The difference in ratios is actually not large, but running time is the best measure. TODO: Annotate results with - and +, derived from the variance maybe. Switch -s / --significance. TODO: catch any exception from the subprocess and print the log if there was one. TODO: Also check the output for other arguments than the default. Easy: use list of results for for the problems, indexed by problem size TODO: In several cases below we'd like to check the entire output, not just one line of it. Easy: lists of lines, match in order. TODO: We might like for the output not to contain any other lines than the ones we are grepping for. Not very hard - just a flag. Run back-to-back for each shell to reduce caching noise("linpack", None, run_linpack, None),("scimark", None, run_scimark, None), Flag --liftoff is implied by --single_threaded | 1,948 | en | 0.883934 |
from typing import Dict, List, Any
import numpy as np
import cv2
from vcap import (
DetectionNode,
DETECTION_NODE_TYPE,
OPTION_TYPE,
BaseStreamState,
BaseBackend,
rect_to_coords)
from vcap_utils import (
BaseOpenVINOBackend,
)
SOS_INDEX = 0
EOS_INDEX = 1
MAX_SEQ_LEN = 28
ALPHABET = ' 0123456789abcdefghijklmnopqrstuvwxyz'
# We have to do this because we need there to be a process_frame to use it
class OpenVINOModel(BaseOpenVINOBackend):
def process_frame(self,
frame: np.ndarray,
detection_node: DETECTION_NODE_TYPE,
options: Dict[str, OPTION_TYPE],
state: BaseStreamState) -> DETECTION_NODE_TYPE:
raise NotImplemented('This backend is not for processing frames. '
'It is only used for storing a model.')
class Backend(BaseBackend):
label_map: Dict[int, str] = {1: "text"}
def __init__(self, detector: OpenVINOModel,
recognizer_encoder: OpenVINOModel,
recognizer_decoder: OpenVINOModel):
super().__init__()
self.detector = detector
self.recognizer_encoder = recognizer_encoder
self.recognizer_decoder = recognizer_decoder
@property
def workload(self) -> float:
return (self.detector.workload +
self.recognizer_encoder.workload +
self.recognizer_decoder.workload)
def process_frame(self, frame: np.ndarray,
detection_node: DETECTION_NODE_TYPE,
options: Dict[str, OPTION_TYPE],
state: BaseStreamState) -> DETECTION_NODE_TYPE:
n, c, h, w = self.detector.net.inputs['im_data'].shape
hidden_shape = self.recognizer_decoder.net.inputs['prev_hidden'].shape
input_dict, resize = self.detector.prepare_inputs(
frame,
frame_input_name="im_data"
)
input_dict["im_data"] = (input_dict["im_data"]
.reshape((n, c, h, w)).astype(np.float32))
input_image_size = self.detector.net.inputs['im_data'].shape[-2:]
input_image_info = np.asarray(
[[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
input_dict["im_info"] = input_image_info
prediction = self.detector.send_to_batch(input_dict).result()
scores = prediction["scores"]
detections_filter = scores > options["threshold"]
scores = scores[detections_filter]
rects = prediction["boxes"][detections_filter]
text_features = prediction["text_features"][detections_filter]
feature_queues = []
for text_feature in text_features:
feature_queues.append(
self.recognizer_encoder.send_to_batch({'input': text_feature}))
detections = []
for score, rect, feature_queue in zip(scores, rects, feature_queues):
feature = feature_queue.result()['output']
feature = np.reshape(feature,
(feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for _ in range(MAX_SEQ_LEN):
decoder_output = self.recognizer_decoder.send_to_batch({
'prev_symbol': prev_symbol_index,
'prev_hidden': hidden,
'encoder_outputs': feature
}).result()
symbols_distr = decoder_output['output']
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += ALPHABET[prev_symbol_index]
hidden = decoder_output['hidden']
detections.append(DetectionNode(
name="text",
coords=rect_to_coords(rect.tolist()),
extra_data={
"detection_confidence": float(score),
"text": text
},
))
return resize.scale_and_offset_detection_nodes(detections)
| capsules/detector_text_openvino/backend.py | 4,255 | We have to do this because we need there to be a process_frame to use it | 72 | en | 0.960407 |
import dlib
from termcolor import colored
from face_cropper.core import DLIB_FACE_DETECTING_MIN_SCORE
def detect(image: str, verbose: bool = False):
"""Detects faces on a given image using dlib and returns matches.
:param image: Path to access the image to be searched
:type image: [string]
:param verbose: Wether or not command should output informations
:type image: [bool], default to False
:raises RuntimeError: When the provided image_path is invalid
:return: The detected faces
:rtype: [list of dlib.rectangle]
"""
detector = dlib.get_frontal_face_detector()
img = dlib.load_rgb_image(image)
dets = detector.run(img, 1, DLIB_FACE_DETECTING_MIN_SCORE)[0]
verbose and print(
colored(
f"Number of faces detected: {len(dets)}\n",
"yellow"
)
)
detections = []
# Avoiding circular imports
from face_cropper.cli.output import colored_detection_output
for index, detection in enumerate(dets):
detections.append(detection)
verbose and print(colored(f"Detection {index + 1}:", "green"))
verbose and colored_detection_output(detection)
return detections
| face_cropper/core/detector.py | 1,197 | Detects faces on a given image using dlib and returns matches.
:param image: Path to access the image to be searched
:type image: [string]
:param verbose: Wether or not command should output informations
:type image: [bool], default to False
:raises RuntimeError: When the provided image_path is invalid
:return: The detected faces
:rtype: [list of dlib.rectangle]
Avoiding circular imports | 395 | en | 0.632029 |
import numpy as np
import os
from sklearn.neighbors import NearestNeighbors
from pydrake.multibody.rigid_body import RigidBody
from pydrake.all import (
AddFlatTerrainToWorld,
AddModelInstancesFromSdfString,
AddModelInstanceFromUrdfFile,
FindResourceOrThrow,
FloatingBaseType,
InputPort,
Isometry3,
OutputPort,
RgbdCamera,
RigidBodyPlant,
RigidBodyTree,
RigidBodyFrame,
RollPitchYaw,
RollPitchYawFloatingJoint,
RotationMatrix,
Value,
VisualElement,
)
import meshcat
import meshcat.transformations as tf
import meshcat.geometry as g
# From
# https://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices
def normalize(x):
return x / np.linalg.norm(x)
def save_pointcloud(pc, normals, path):
joined = np.hstack([pc.T, normals.T])
np.savetxt(path, joined)
def load_pointcloud(path):
joined = np.loadtxt(path)
return joined[:, 0:3].T, joined[:, 3:6].T
def translate(x):
T = np.eye(4)
T[0:3, 3] = x[:3]
return T
def get_pose_error(tf_1, tf_2):
rel_tf = transform_inverse(tf_1).dot(tf_2)
if np.allclose(np.diag(rel_tf[0:3, 0:3]), [1., 1., 1.]):
angle_dist = 0.
else:
# Angle from rotation matrix
angle_dist = np.arccos(
(np.sum(np.diag(rel_tf[0:3, 0:3])) - 1) / 2.)
euclid_dist = np.linalg.norm(rel_tf[0:3, 3])
return euclid_dist, angle_dist
# If misalignment_tol = None, returns the average
# distance between the model clouds when transformed
# by est_tf and gt_tf (using nearest-point lookups
# for each point in the gt-tf'd model cloud).
# If misalignment_tol is a number, it returns
# the percent of points that are misaligned by more
# than the misalignment error under the same distance
# metric.
def get_earth_movers_error(est_tf, gt_tf, model_cloud,
misalignment_tol=0.005):
# Transform the model cloud into both frames
est_model_cloud = transform_points(est_tf, model_cloud)
gt_model_cloud = transform_points(gt_tf, model_cloud)
# For every point in the model cloud, find the distance
# to the closest point in the estimated model cloud,
# as a way of finding the swept volume between the
# models in those poses.
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(gt_model_cloud.T)
dist, _ = neigh.kneighbors(
est_model_cloud[0:3, :].T, return_distance=True)
if misalignment_tol is None:
return np.mean(dist)
else:
return np.mean(dist > misalignment_tol)
def draw_points(vis, vis_prefix, name, points,
normals=None, colors=None, size=0.001,
normals_length=0.01):
vis[vis_prefix][name].set_object(
g.PointCloud(position=points,
color=colors,
size=size))
n_pts = points.shape[1]
if normals is not None:
# Drawing normals for debug
lines = np.zeros([3, n_pts*2])
inds = np.array(range(0, n_pts*2, 2))
lines[:, inds] = points[0:3, :]
lines[:, inds+1] = points[0:3, :] + \
normals * normals_length
vis[vis_prefix]["%s_normals" % name].set_object(
meshcat.geometry.LineSegmentsGeometry(
lines, None))
def transform_points(tf, pts):
return ((tf[:3, :3].dot(pts).T) + tf[:3, 3]).T
def transform_inverse(tf):
new_tf = np.eye(4)
new_tf[:3, :3] = tf[:3, :3].T
new_tf[:3, 3] = -new_tf[:3, :3].dot(tf[:3, 3])
return new_tf
def lookat(eye, target, up):
# For a camera with +x right, +y down, and +z forward.
eye = np.array(eye)
target = np.array(target)
up = np.array(up)
F = target[:3] - eye[:3]
f = normalize(F)
U = normalize(up[:3])
s = np.cross(f, U) # right
u = np.cross(s, f) # up
M = np.eye(4)
M[:3, :3] = np.vstack([s, -u, f]).T
# OLD:
# flip z -> x
# -x -> y
# -y -> z
# CAMERA FORWARD is +x-axis
# CAMERA RIGHT is -y axis
# CAMERA UP is +z axis
# Why does the Drake documentation lie to me???
T = translate(eye)
return T.dot(M)
def add_single_instance_to_rbt(
rbt, config, instance_config, i,
floating_base_type=FloatingBaseType.kRollPitchYaw):
class_name = instance_config["class"]
if class_name not in config["objects"].keys():
raise ValueError("Class %s not in classes." % class_name)
if len(instance_config["pose"]) != 6:
raise ValueError("Class %s has pose size != 6. Use RPY plz" %
class_name)
frame = RigidBodyFrame(
"%s_%d" % (class_name, i), rbt.world(),
instance_config["pose"][0:3],
instance_config["pose"][3:6])
model_path = config["objects"][class_name]["model_path"]
_, extension = os.path.splitext(model_path)
if extension == ".urdf":
AddModelInstanceFromUrdfFile(
model_path, floating_base_type, frame, rbt)
elif extension == ".sdf":
AddModelInstancesFromSdfString(
open(model_path).read(), floating_base_type, frame, rbt)
else:
raise ValueError("Class %s has non-sdf and non-urdf model name." %
class_name)
def setup_scene(rbt, config):
if config["with_ground"] is True:
AddFlatTerrainToWorld(rbt)
for i, instance_config in enumerate(config["instances"]):
add_single_instance_to_rbt(rbt, config, instance_config, i,
floating_base_type=FloatingBaseType.kFixed)
# Add camera geometry!
camera_link = RigidBody()
camera_link.set_name("camera_link")
# necessary so this last link isn't pruned by the rbt.compile() call
camera_link.set_spatial_inertia(np.eye(6))
camera_link.add_joint(
rbt.world(),
RollPitchYawFloatingJoint(
"camera_floating_base",
np.eye(4)))
rbt.add_rigid_body(camera_link)
# - Add frame for camera fixture.
camera_frame = RigidBodyFrame(
name="rgbd_camera_frame", body=camera_link,
xyz=[0.0, 0., 0.], rpy=[0., 0., 0.])
rbt.addFrame(camera_frame)
rbt.compile() | src/utils.py | 6,215 | From https://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices Angle from rotation matrix If misalignment_tol = None, returns the average distance between the model clouds when transformed by est_tf and gt_tf (using nearest-point lookups for each point in the gt-tf'd model cloud). If misalignment_tol is a number, it returns the percent of points that are misaligned by more than the misalignment error under the same distance metric. Transform the model cloud into both frames For every point in the model cloud, find the distance to the closest point in the estimated model cloud, as a way of finding the swept volume between the models in those poses. Drawing normals for debug For a camera with +x right, +y down, and +z forward. right up OLD: flip z -> x -x -> y -y -> z CAMERA FORWARD is +x-axis CAMERA RIGHT is -y axis CAMERA UP is +z axis Why does the Drake documentation lie to me??? Add camera geometry! necessary so this last link isn't pruned by the rbt.compile() call - Add frame for camera fixture. | 1,048 | en | 0.877532 |
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
| udacity_deep_learning/download_data.py | 2,658 | A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
Download a file if not present, and make sure it's the right size.
Change me to store data elsewhere remove .tar.gz You may override by setting force=True. | 314 | en | 0.90038 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat.core.registry import registry
from habitat.core.simulator import Simulator
def _try_register_igibson_socialnav():
try:
import habitat_sim # noqa: F401
has_habitat_sim = True
except ImportError as e:
has_habitat_sim = False
habitat_sim_import_error = e
if has_habitat_sim:
from habitat.sims.igibson_challenge.social_nav import (
iGibsonSocialNav
) # noqa: F401
from habitat.sims.igibson_challenge.interactive_nav import (
iGibsonInteractiveNav
) # noqa: F401
else:
@registry.register_simulator(name="iGibsonSocialNav")
class iGibsonSocialNavImportError(Simulator):
def __init__(self, *args, **kwargs):
raise habitat_sim_import_error
@registry.register_simulator(name="iGibsonInteractiveNav")
class iGibsonSocialNavImportError(Simulator):
def __init__(self, *args, **kwargs):
raise habitat_sim_import_error
| habitat/sims/igibson_challenge/__init__.py | 1,216 | !/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. noqa: F401 noqa: F401 noqa: F401 | 223 | en | 0.790096 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from .forms import PostForm, CommentForm
from .models import Post, Comment
def post_list(request):
queryset_list = Post.objects.all().order_by('-publish', 'id')
paginator = Paginator(queryset_list, 25) # Show 25 contacts per page
page = request.GET.get('page')
try:
post_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
post_list = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
post_list = paginator.page(paginator.num_pages)
return render(request, "pages/home.html", {
'post_list': post_list,
})
def post_detail(request, slug):
post = get_object_or_404(Post, slug=slug)
if request.method == 'POST':
if request.user:
form = CommentForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.post = post
instance.save()
messages.add_message(request, messages.SUCCESS, 'Comment Added')
form = CommentForm()
return render(request, 'blog/post_detail.html', {
'post': post,
'form': form,
})
@login_required
def post_add(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.add_message(request, messages.SUCCESS, 'Blog Post Added')
form = PostForm()
else:
form = PostForm()
return render(request, 'blog/post_form.html', {
'form': form,
})
| blog/blog/views.py | 2,075 | -*- coding: utf-8 -*- Show 25 contacts per page If page is not an integer, deliver first page. If page is out of range (e.g. 9999), deliver last page of results. | 161 | en | 0.682142 |
import numpy as np
import hypothesis
import strax.testutils
import straxen
def channel_split_naive(r, channel_ranges):
"""Slower but simpler implementation of straxen.split_channel_ranges"""
results = []
for left, right in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, right + 1))])
return results
@hypothesis.settings(deadline=None)
@hypothesis.given(strax.testutils.several_fake_records)
def test_channel_split(records):
channel_range = np.asarray([[0, 0], [1, 2], [3, 3], [4, 999]])
result = list(straxen.split_channel_ranges(records, channel_range))
result_2 = channel_split_naive(records, channel_range)
assert len(result) == len(result_2)
for i, _ in enumerate(result):
np.testing.assert_array_equal(
np.unique(result[i]['channel']),
np.unique(result_2[i]['channel']))
np.testing.assert_array_equal(result[i], result_2[i])
| tests/test_channel_split.py | 944 | Slower but simpler implementation of straxen.split_channel_ranges | 65 | en | 0.501886 |
import sys
import resource
from recommender import recommender
reload(sys)
sys.setdefaultencoding("UTF8")
import os
import uuid
from flask import *
from flask.ext.socketio import SocketIO, emit
from flask_socketio import join_room, leave_room
import psycopg2
import psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
def connect_to_db():
return psycopg2.connect('dbname=movie_recommendations user=movie_normal password=password host=localhost')
# return psycopg2.connect('dbname=movie_recommendations user=postgres password=Cmpgamer1 host=localhost')
@socketio.on('connect', namespace='/movie')
def makeConnection():
session['uuid'] = uuid.uuid1()
print ('Connected')
@socketio.on('identify', namespace='/movie')
def on_identify(user):
print('Identify: ' + user)
users[session['uuid']] = {'username' : user}
movieSearchQuery = "SELECT movie_title FROM movie_titles WHERE movie_title LIKE %s"
newMovieSearch = "select mt.movie_title, my.year from movie_titles mt join movie_years my on mt.id = my.movie_id WHERE movie_title LIKE %s"
movieGenreSearch = "select mt.movie_title, mg.movie_genre from movie_titles mt join movie_genres mg on mt.id = mg.movie_id WHERE movie_title LIKE %s"
@socketio.on('search', namespace='/movie')
def search(searchItem):
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
searchQuery = ""
results = []
queryResults = []
searchTerm = '%{0}%'.format(searchItem)
try:
cur.execute(newMovieSearch, (searchTerm,))
results = cur.fetchall()
except Exception as e:
print("Error: Invalid SEARCH in 'movie_titles' table: %s" % e)
try:
cur.execute(movieGenreSearch, (searchTerm,))
genreResults = cur.fetchall()
except Exception as e:
print("Error: Invalid SEARCH in 'movie_titles' table: %s" % e)
movieGenres = {}
copyGenres = genreResults
parsedResults = []
movieList = {}
prevMovie = None
for movie in genreResults:
if prevMovie is not None and prevMovie[0] == movie[0]:
movieList[movie[0]].append(movie[1])
else:
movieList[movie[0]] = [movie[1]]
prevMovie = movie
for i in range(len(results)):
resultsDict = {'text' : results[i]['movie_title'], 'year' : results[i]['year']}
if results[i]['movie_title'] in movieList:
resultsDict['genres'] = movieList[results[i]['movie_title']]
queryResults.append(resultsDict)
print(queryResults)
cur.close()
db.close()
emit('searchResults', queryResults)
doesUserAlreadyExist = 'SELECT * FROM users WHERE username = %s LIMIT 1'
registerNewUser = "INSERT INTO users VALUES (default, %s, %s, %s, crypt(%s, gen_salt('md5')))"
@app.route('/register', methods=['GET', 'POST'])
def register():
redirectPage = 'landing.html'
error = ''
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
firstName = request.form['firstName']
lastName = request.form['lastName']
username = request.form['registerUsername']
password = request.form['registerPassword']
password2 = request.form['registerConfirmPassword']
if username.isspace():
error += 'Username is required.\n'
if firstName.isspace():
error += 'First Name is required.\n'
if lastName.isspace():
error += 'Last Name is required.\n'
if password.isspace():
error += 'Password is required.\n'
if password2.isspace():
error += 'Password must be entered in twice.\n'
if password != password2:
error += 'Passwords do not match.\n'
if len(error) == 0:
try:
cur.execute(doesUserAlreadyExist, (username,)) # check whether user already exists
if cur.fetchone():
error += 'Username is already taken.\n'
else:
try:
cur.execute(registerNewUser, (firstName, lastName, username, password)) # add user to database
db.commit()
except Exception as e:
print("Error: Invalid INSERT in 'user' table: %s" % e)
except Exception as e:
print("Error: Invalid SEARCH in 'user' table: %s" % e)
cur.close()
db.close()
if len(error) != 0:
redirectPage = 'landing.html'
if len(error) != 0:
pass
# flash error message
return render_template(redirectPage, error=error)
loginQuery = 'SELECT * from users WHERE username = %s AND password = crypt(%s, password)'
@app.route('/login', methods=['GET', 'POST'])
def login():
redirectPage = 'landing.html'
error = ''
results = None
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
username = request.form['username']
pw = request.form['password']
try:
cur.execute(loginQuery, (username, pw))
results = cur.fetchone()
except Exception as e:
print("Error: SEARCH in 'user' table: %s" % e)
cur.close()
db.close()
if not results: # user does not exist
error += 'Incorrect username or password.\n'
else:
print(results['username'])
session['username'] = results['username']
session['id'] = results['id']
results = []
return redirect(url_for('index'))
if len(error) != 0:
pass
# flash error
return render_template(redirectPage, error=error)
@app.route('/landing', methods=['GET', 'POST'])
def landing():
if 'username' in session:
print("index")
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
#get dynamic top 12
query = "SELECT movie_titles.movie_title, movie_ratings.rating FROM movie_titles INNER JOIN movie_ratings ON movie_titles.id=movie_ratings.movie_id ORDER BY movie_ratings.rating DESC LIMIT 12;"
#print("are we getting here?????????????")
try:
cur.execute(query)
results=cur.fetchall()
except Exception, e:
raise e
return render_template('index.html', results=results)
else:
return render_template('landing.html')
@app.route('/', methods=['GET', 'POST'])
def index():
if 'username' in session:
print("index")
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
#get dynamic top 12
query = "SELECT movie_titles.movie_title, movie_ratings.rating FROM movie_titles INNER JOIN movie_ratings ON movie_titles.id=movie_ratings.movie_id ORDER BY movie_ratings.rating DESC LIMIT 12;"
#print("are we getting here?????????????")
try:
cur.execute(query)
results=cur.fetchall()
except Exception, e:
raise e
return render_template('index.html', results=results)
else:
return render_template('landing.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session.clear()
return redirect(url_for('index'))
movieRatingQuery = "SELECT mt.movie_title as movie_id, u.id, mr.rating FROM movie_ratings mr JOIN users u on u.id = mr.user_id JOIN movie_titles mt ON mt.id = mr.movie_id"
movieIDQuery = "SELECT * FROM movie_titles"
@socketio.on('recommend', namespace='/movie')
def recommend(test):
print("Do I get here?")
redirectPage = 'recommendations.html'
data = {}
productid2name = {}
userRatings= {}
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cur.execute(movieRatingQuery)
results = cur.fetchall()
except Exception as e:
print("Error: SEARCH in 'movie_ratings table: %s" % e)
for row in results:
user = row['id']
movie = row['movie_id']
rating = float(row['rating'])
if user in data:
currentRatings = data[user]
else:
currentRatings = {}
currentRatings[movie] = rating
data[user] = currentRatings
try:
cur.execute(movieIDQuery)
results = cur.fetchall()
except Exception as e:
print("Error: SEARCH in 'movie_titles' table: %s" % e)
cur.close()
db.close()
movieLens = recommender(5, 15) #Manhattan Distance 5 Nearest Neighbors
movieLens.data = data
results = movieLens.recommend(session['id'])
print(results)
queryResults = []
for i,movie in results:
queryResults.append({'text': movie[0]})
print(queryResults)
emit('recommendationResults', queryResults)
getMovieIDQuery= "SELECT movie_titles.id FROM movie_titles JOIN movie_years ON movie_titles.id = movie_years.movie_id WHERE movie_title = %s AND year = %s"
insertRateQuery= "INSERT INTO movie_ratings VALUES(default, %s, %s, %s)"
## default, movie_id, user_id, movie_review
insertReviewQuery="INSERT INTO movie_reviews VALUES(default, %s, %s, %s)"
@app.route('/rateMovie', methods=['GET', 'POST'])
def rateMovie():
redirectPage= "index.html"
if request.method == 'POST':
db = connect_to_db()
cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
movie_title= request.form['moviename'] #both queries
rating = request.form['movierating'] #insertRateQuery
review = request.form['moviereview']
year = request.form['movieyear']
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# print(rating)
# print(year)
# print(session['id'])
# print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
try:
cur.execute(getMovieIDQuery, (movie_title, year))
movieID = cur.fetchone()
except Exception as e:
print(e)
#
# Work out logic to prevent people from rating movies twice.
#
if rating:
try:
cur.execute(insertRateQuery, (session['id'], movieID['id'], rating))
db.commit()
except Exception as e:
pas
print(e)
else:
pass
if review:
try:
cur.execute(insertReviewQuery, (movieID['id'], session['id'], review))
except Exception as e:
print(e)
else:
pass
return redirect(url_for('index'))
# start the server
if __name__ == '__main__':
socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port =int(os.getenv('PORT', 8080)), debug=True)
| .~c9_invoke_iUgkLr.py | 11,249 | return psycopg2.connect('dbname=movie_recommendations user=postgres password=Cmpgamer1 host=localhost') check whether user already exists add user to database flash error message user does not exist flash errorget dynamic top 12print("are we getting here?????????????")get dynamic top 12print("are we getting here?????????????")Manhattan Distance 5 Nearest Neighbors default, movie_id, user_id, movie_reviewboth queriesinsertRateQuery print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") print(rating) print(year) print(session['id']) print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") Work out logic to prevent people from rating movies twice. start the server | 637 | en | 0.466049 |
import os
import sys
import tempfile
import pytest
import logging
from pathlib import Path
from dtaidistance import dtw, dtw_ndim, clustering, util_numpy
import dtaidistance.dtw_visualisation as dtwvis
from dtaidistance.exceptions import PyClusteringException
logger = logging.getLogger("be.kuleuven.dtai.distance")
directory = None
numpyonly = pytest.mark.skipif("util_numpy.test_without_numpy()")
scipyonly = pytest.mark.skipif("util_numpy.test_without_scipy()")
@numpyonly
def test_clustering():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 3, 4}
assert cluster_idx[2] == {2, 5}
@numpyonly
def test_clustering_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@numpyonly
def test_clustering_tree_ndim():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[[0.,0.], [0,0], [1,0], [2,0], [1,0], [0,0], [1,0], [0,0], [0,0]],
[[0.,0.], [1,0], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [0,0]],
[[1.,0.], [2,0], [0,0], [0,0], [0,0], [0,0], [0,0], [1,0], [1,0]]])
model = clustering.Hierarchical(dtw_ndim.distance_matrix_fast, {'ndim':2},
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 2}
@numpyonly
def test_clustering_tree_maxdist():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False, max_dist=0.1)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_linkage_tree():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
if not dtwvis.test_without_visualization():
model.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(model.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
@scipyonly
@numpyonly
def test_controlchart():
with util_numpy.test_uses_numpy() as np:
series = np.zeros((600, 60))
rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')
with open(rsrc_fn, 'r') as ifile:
for idx, line in enumerate(ifile.readlines()):
series[idx, :] = line.split()
s = []
for idx in range(0, 600, 20):
s.append(series[idx, :])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {'parallel': True})
cluster_idx = model.fit(s)
if not dtwvis.test_without_visualization():
import matplotlib.pyplot as plt
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
show_ts_label = lambda idx: "ts-" + str(idx)
# show_ts_label = list(range(len(s)))
def curcmap(idx):
if idx % 2 == 0:
return 'r'
return 'g'
model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,
show_tr_label=True, ts_label_margin=-10,
ts_left_margin=10, ts_sample_length=1, ts_color=curcmap)
print("Figure saved to", hierarchy_fn)
@scipyonly
@numpyonly
def test_plotbug1():
with util_numpy.test_uses_numpy() as np:
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])
s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])
series = s1, s2
m = clustering.LinkageTree(dtw.distance_matrix, {})
m.fit(series)
if not dtwvis.test_without_visualization():
if directory:
hierarchy_fn = os.path.join(directory, "clustering.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_clustering.png"
m.plot(hierarchy_fn)
print("Figure save to", hierarchy_fn)
@numpyonly
def test_clustering_centroid():
with util_numpy.test_uses_numpy() as np:
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
# def test_hook(from_idx, to_idx, distance):
# assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.KMedoids(dtw.distance_matrix_fast, {}, k=3,
show_progress=False)
try:
cluster_idx = model.fit(s)
except PyClusteringException:
return
# assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if not dtwvis.test_without_visualization():
if directory:
png_fn = os.path.join(directory, "centroid.png")
else:
file = tempfile.NamedTemporaryFile()
png_fn = file.name + "_centroid.png"
model.plot(png_fn)
print("Figure saved to", png_fn)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_clustering_tree()
test_clustering_tree_ndim()
# test_clustering_tree_maxdist()
# test_linkage_tree()
# test_controlchart()
# test_plotbug1()
# test_clustering_centroid()
| tests/test_clustering.py | 9,708 | show_ts_label = list(range(len(s))) def test_hook(from_idx, to_idx, distance): assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)] assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6} test_clustering_tree() test_clustering_tree_maxdist() test_linkage_tree() test_controlchart() test_plotbug1() test_clustering_centroid() | 344 | en | 0.318576 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import logging
import numpy as np
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.tracking_dict import UsageTrackingDict
tf = try_import_tf()
logger = logging.getLogger(__name__)
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
"""
def __init__(self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=None,
grad_stats_fn=None,
before_loss_init=None,
make_model=None,
action_sampler_fn=None,
existing_inputs=None,
existing_model=None,
get_batch_divisibility_req=None,
obs_include_prev_action_reward=True):
"""Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (func): optional function to run prior to loss
init that takes the same arguments as __init__
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): when copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
"""
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
# Setup standard placeholders
prev_actions = None
prev_rewards = None
if existing_inputs is not None:
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(
action_space)
prev_rewards = tf.placeholder(
tf.float32, [None], name="prev_reward")
self._input_dict = {
SampleBatch.CUR_OBS: obs,
SampleBatch.PREV_ACTIONS: prev_actions,
SampleBatch.PREV_REWARDS: prev_rewards,
"is_training": self._get_is_training_placeholder(),
}
self._seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Setup model
if action_sampler_fn:
if not make_model:
raise ValueError(
"make_model is required if action_sampler_fn is given")
self.dist_class = None
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="tf")
if existing_inputs:
self._state_in = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_in:
self._seq_lens = existing_inputs["seq_lens"]
else:
self._state_in = [
tf.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
model_out, self._state_out = self.model(self._input_dict,
self._state_in, self._seq_lens)
# Setup action sampler
if action_sampler_fn:
action_sampler, action_logp = action_sampler_fn(
self, self.model, self._input_dict, obs_space, action_space,
config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
# Phase 1 init
sess = tf.get_default_session() or tf.Session()
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(
self,
obs_space,
action_space,
sess,
obs_input=obs,
action_sampler=action_sampler,
action_logp=action_logp,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_in,
state_outputs=self._state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req)
# Phase 2 init
before_loss_init(self, obs_space, action_space, config)
if not existing_inputs:
self._initialize_loss()
@override(TFPolicy)
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if self._state_inputs:
num_state_inputs = len(self._state_inputs) + 1
else:
num_state_inputs = 0
if len(self._loss_inputs) + num_state_inputs != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_inputs,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_inputs):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(("state_in_{}".format(i),
existing_inputs[len(self._loss_inputs) + i]))
if rnn_inputs:
rnn_inputs.append(("seq_lens", existing_inputs[-1]))
input_dict = OrderedDict(
[(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)] + rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
@override(Policy)
def get_initial_state(self):
if self.model:
return self.model.get_initial_state()
else:
return []
def is_recurrent(self):
return len(self._state_in) > 0
def num_state_tensors(self):
return len(self._state_in)
def _initialize_loss(self):
def fake_array(tensor):
shape = tensor.shape.as_list()
shape = [s if s is not None else 1 for s in shape]
return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)
dummy_batch = {
SampleBatch.CUR_OBS: fake_array(self._obs_input),
SampleBatch.NEXT_OBS: fake_array(self._obs_input),
SampleBatch.DONES: np.array([False], dtype=np.bool),
SampleBatch.ACTIONS: fake_array(
ModelCatalog.get_action_placeholder(self.action_space)),
SampleBatch.REWARDS: np.array([0], dtype=np.float32),
}
if self._obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: fake_array(self._prev_action_input),
SampleBatch.PREV_REWARDS: fake_array(self._prev_reward_input),
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = np.expand_dims(h, 0)
dummy_batch["state_out_{}".format(i)] = np.expand_dims(h, 0)
state_batches.append(np.expand_dims(h, 0))
if state_init:
dummy_batch["seq_lens"] = np.array([1], dtype=np.int32)
for k, v in self.extra_compute_action_fetches().items():
dummy_batch[k] = fake_array(v)
# postprocessing might depend on variable init, so run it first here
self._sess.run(tf.global_variables_initializer())
postprocessed_batch = self.postprocess_trajectory(
SampleBatch(dummy_batch))
# model forward pass for the loss (needed after postprocess to
# overwrite any tensor state from that call)
self.model(self._input_dict, self._state_in, self._seq_lens)
if self._obs_include_prev_action_reward:
train_batch = UsageTrackingDict({
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.PREV_ACTIONS, self._prev_action_input),
(SampleBatch.PREV_REWARDS, self._prev_reward_input),
(SampleBatch.CUR_OBS, self._obs_input),
]
else:
train_batch = UsageTrackingDict({
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.CUR_OBS, self._obs_input),
]
for k, v in postprocessed_batch.items():
if k in train_batch:
continue
elif v.dtype == np.object:
continue # can't handle arbitrary objects in TF
elif k == "seq_lens" or k.startswith("state_in_"):
continue
shape = (None, ) + v.shape[1:]
dtype = np.float32 if v.dtype == np.float64 else v.dtype
placeholder = tf.placeholder(dtype, shape=shape, name=k)
train_batch[k] = placeholder
for i, si in enumerate(self._state_in):
train_batch["state_in_{}".format(i)] = si
train_batch["seq_lens"] = self._seq_lens
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
self._loss_input_dict = train_batch
loss = self._do_loss_init(train_batch)
for k in sorted(train_batch.accessed_keys):
if k != "seq_lens" and not k.startswith("state_in_"):
loss_inputs.append((k, train_batch[k]))
TFPolicy._initialize_loss(self, loss, loss_inputs)
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
self._sess.run(tf.global_variables_initializer())
def _do_loss_init(self, train_batch):
loss = self._loss_fn(self, self.model, self.dist_class, train_batch)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# override the update ops to be those of the model
self._update_ops = self.model.update_ops()
return loss
| rllib/policy/dynamic_tf_policy.py | 14,878 | A TFPolicy that auto-defines placeholders dynamically at runtime.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (func): optional function to run prior to loss
init that takes the same arguments as __init__
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): when copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
Creates a copy of self using existing input placeholders.
Setup standard placeholders Setup model Setup action sampler Phase 1 init dynamically initialized on run Phase 2 init Note that there might be RNN state inputs at the end of the list By convention, the loss inputs are followed by state inputs and then the seq len tensor postprocessing might depend on variable init, so run it first here model forward pass for the loss (needed after postprocess to overwrite any tensor state from that call) can't handle arbitrary objects in TF override the update ops to be those of the model | 3,080 | en | 0.732407 |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.colors import BoundaryNorm
import matplotlib.image as mpimg
Uinf=1
R=15
PI=np.pi
alpha = 1
w = alpha/R
gamma= -w * 2*PI* R*R
angle = np.linspace(0, 360, 360)
cp = 1 - (4*(np.sin(angle*(PI/180) )**2) + (2*gamma*np.sin(angle *(PI/180)))/(PI*R*Uinf) + (gamma/(2*PI*R*Uinf))**2 )
fig, ax = plt.subplots()
ax.plot(angle, cp, '--k')
#ax.plot(angle, Z[edge_x,edge_y], 'ok', markersize=5)
#ax.set_ylim(limits[0], limits[1])
#Grid
ax.xaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.grid(which='major', color='#CCCCCC', linestyle='-', alpha=1)
ax.grid(which='minor', color='#CCCCCC', linestyle='--', alpha=0.5)
fig.savefig(f'./cp_{alpha}.png')
plt.close() | util/unit_test/potential_test/cp_potential.py | 946 | ax.plot(angle, Z[edge_x,edge_y], 'ok', markersize=5)ax.set_ylim(limits[0], limits[1]) Grid | 90 | en | 0.218231 |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/jrdsite"
# docs_base_url = "https://[org_name].github.io/jrdsite"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "jrdsite"
| jrdsite/config/docs.py | 313 | Configuration for docs
source_link = "https://github.com/[org_name]/jrdsite" docs_base_url = "https://[org_name].github.io/jrdsite" headline = "App that does everything" sub_heading = "Yes, you got that right the first time, everything" | 238 | en | 0.734275 |
# MIT License
#
# Copyright (c) 2017 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Deep Learning Course | Fall 2018
# Date Created: 2018-09-04
################################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.nn as nn
import torch
class TextGenerationModel(nn.Module):
def __init__(self, batch_size, seq_length, vocabulary_size,
lstm_num_hidden=256, lstm_num_layers=2, device='cuda:0', input_size=1):
super(TextGenerationModel, self).__init__()
self.emb_size = 64
self.device = device
# self.emb = nn.Embedding(batch_size * seq_length, 64)
# self.lstm = nn.LSTM(64, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)
self.lstm = nn.LSTM(input_size, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0)
self.linear = nn.Linear(lstm_num_hidden, vocabulary_size)
self.h = None
def forward(self, x):
# Reset hidden layer for Training
if self.training:
self.h = None
# x = self.emb(x.squeeze(-1).type(torch.LongTensor).to(self.device))
out, h = self.lstm(x.transpose(0, 1), self.h)
out = self.linear(out)
# Handle hidden layer for Inference
if not self.training:
self.h = h
return out
def reset_hidden(self):
self.h = None
| assignment_2/part3/model.py | 1,932 | MIT License Copyright (c) 2017 Tom Runia Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to conditions. Author: Deep Learning Course | Fall 2018 Date Created: 2018-09-04 self.emb = nn.Embedding(batch_size * seq_length, 64) self.lstm = nn.LSTM(64, lstm_num_hidden, num_layers=lstm_num_layers, dropout=0) Reset hidden layer for Training x = self.emb(x.squeeze(-1).type(torch.LongTensor).to(self.device)) Handle hidden layer for Inference | 791 | en | 0.775571 |
'''
xbmcswift2.cli.cli
------------------
The main entry point for the xbmcswift2 console script. CLI commands can be
registered in this module.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import sys
from optparse import OptionParser
from xbmcswift2.cli.app import RunCommand
from xbmcswift2.cli.create import CreateCommand
# TODO: Make an ABC for Command
COMMANDS = {
RunCommand.command: RunCommand,
CreateCommand.command: CreateCommand,
}
# TODO: Make this usage dynamic based on COMMANDS dict
USAGE = '''%prog <command>
Commands:
create
Create a new plugin project.
run
Run an xbmcswift2 plugin from the command line.
Help:
To see options for a command, run `xbmcswift2 <command> -h`
'''
def main():
'''The entry point for the console script xbmcswift2.
The 'xbcmswift2' script is command bassed, so the second argument is always
the command to execute. Each command has its own parser options and usages.
If no command is provided or the -h flag is used without any other
commands, the general help message is shown.
'''
parser = OptionParser()
if len(sys.argv) == 1:
parser.set_usage(USAGE)
parser.error('At least one command is required.')
# spy sys.argv[1] in order to use correct opts/args
command = sys.argv[1]
if command == '-h':
parser.set_usage(USAGE)
opts, args = parser.parse_args()
if command not in COMMANDS.keys():
parser.error('Invalid command')
# We have a proper command, set the usage and options list according to the
# specific command
manager = COMMANDS[command]
if hasattr(manager, 'option_list'):
for args, kwargs in manager.option_list:
parser.add_option(*args, **kwargs)
if hasattr(manager, 'usage'):
parser.set_usage(manager.usage)
opts, args = parser.parse_args()
# Since we are calling a specific comamnd's manager, we no longer need the
# actual command in sys.argv so we slice from position 1
manager.run(opts, args[1:])
| resources/lib/xbmcswift2/cli/cli.py | 2,208 | The entry point for the console script xbmcswift2.
The 'xbcmswift2' script is command bassed, so the second argument is always
the command to execute. Each command has its own parser options and usages.
If no command is provided or the -h flag is used without any other
commands, the general help message is shown.
xbmcswift2.cli.cli
------------------
The main entry point for the xbmcswift2 console script. CLI commands can be
registered in this module.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
TODO: Make an ABC for Command TODO: Make this usage dynamic based on COMMANDS dict spy sys.argv[1] in order to use correct opts/args We have a proper command, set the usage and options list according to the specific command Since we are calling a specific comamnd's manager, we no longer need the actual command in sys.argv so we slice from position 1 | 899 | en | 0.808266 |
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'logstash', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-logstash',
version=ABOUT['__version__'],
description='The Logstash check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent logstash check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-extras',
# Author details
author='ervansetiawan@gmail.com',
author_email='ervansetiawan@gmail.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
],
# The package we're going to ship
packages=['datadog_checks.logstash'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
| logstash/setup.py | 2,325 | To use a consistent encoding Get version info Get the long description from the README file Windows \r\n prevents match The project's main homepage. Author details License See https://pypi.org/classifiers The package we're going to ship Run-time dependencies Extra files to ship with the wheel package | 301 | en | 0.816398 |
import json
import os
import pathlib
from decouple import config
LIVE_DEMO_MODE = config('DEMO_MODE', cast=bool, default=False)
PORT = config('PORT', cast=int, default=5000)
APP_URL = 'https://bachelor-thesis.herokuapp.com/'
DEBUG_MODE = config('DEBUG', cast=bool, default=False)
NO_DELAYS = config('NO_DELAYS', cast=bool, default=False)
REDIS_URL = config('REDIS_URL')
DIALOGFLOW_ACCESS_TOKEN = config('DIALOGFLOW_ACCESS_TOKEN')
FACEBOOK_ACCESS_TOKEN = config('FACEBOOK_ACCESS_TOKEN')
TELEGRAM_ACCESS_TOKEN = config('TELEGRAM_ACCESS_TOKEN')
TWILIO_ACCESS_TOKEN = config('TWILIO_ACCESS_TOKEN')
TWILIO_ACCOUNT_SID = config('TWILIO_ACCOUNT_SID')
DATABASE_URL = config('DATABASE_URL')
ENABLE_CONVERSATION_RECORDING = config('RECORD_CONVERSATIONS', cast=bool, default=True)
CONTEXT_LOOKUP_RECENCY = 15
SUPPORT_CHANNEL_ID = -1001265422831
GOOGLE_SERVICE_ACCOUNT_KEY = config('GOOGLE_SERVICE_ACCOUNT_KEY').replace("\\n", "\n")
# Insert google private key into a template of the json configuration and add it to environment vars
_root_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
if not os.path.exists('tmp'):
os.makedirs('tmp')
google_service_account_file = _root_dir / 'tmp' / 'service-account-file.json'
template = json.load(open(_root_dir / "google-service-template.json", 'r'))
template["private_key"] = GOOGLE_SERVICE_ACCOUNT_KEY
json.dump(template, open(google_service_account_file, 'w+'))
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = str(google_service_account_file)
# Whether to remove the ForceReply markup in Telegram for any non-keyboard message (useful for demo)
ALWAYS_REMOVE_MARKUP = LIVE_DEMO_MODE
| settings.py | 1,635 | Insert google private key into a template of the json configuration and add it to environment vars Whether to remove the ForceReply markup in Telegram for any non-keyboard message (useful for demo) | 197 | en | 0.554569 |
# Generated by Django 2.1.4 on 2018-12-29 01:40
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='bankaccount',
old_name='customer_id',
new_name='customer',
),
migrations.AlterField(
model_name='bankaccount',
name='account_opened',
field=models.DateTimeField(default=datetime.datetime(2018, 12, 28, 19, 40, 54, 177327)),
),
]
| account/migrations/0002_auto_20181228_1940.py | 604 | Generated by Django 2.1.4 on 2018-12-29 01:40 | 45 | en | 0.637675 |
import logging
import os
import types
from datetime import datetime
import pandas as pd
from sdgym.data import load_dataset
from sdgym.evaluate import compute_scores
from sdgym.synthesizers import BaseSynthesizer
LOGGER = logging.getLogger(__name__)
BASE_DIR = os.path.dirname(__file__)
LEADERBOARD_PATH = os.path.join(BASE_DIR, 'leaderboard.csv')
DEFAULT_DATASETS = [
"adult",
"alarm",
"asia",
"census",
"child",
"covtype",
"credit",
"grid",
"gridr",
"insurance",
"intrusion",
"mnist12",
"mnist28",
"news",
"ring"
]
def compute_benchmark(synthesizer, datasets=DEFAULT_DATASETS, iterations=3):
"""Compute the scores of a synthesizer over a list of datasets.
The results are returned in a raw format as a ``pandas.DataFrame`` containing:
- One row for each dataset+scoring method (for example, a classifier)
- One column for each computed metric
- The columns:
- dataset
- distance
- name (of the scoring method)
- iteration
For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2
iterations produces a table similar to this::
dataset name iter distance accuracy f1 syn_likelihood test_likelihood
adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN
adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN
adult Logistic... 0 0.0 0.79 0.66 NaN NaN
adult MLP... 0 0.0 0.84 0.67 NaN NaN
adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN
adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN
adult Logistic... 1 0.0 0.79 0.65 NaN NaN
adult MLP... 1 0.0 0.84 0.64 NaN NaN
asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24
asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24
"""
results = list()
for dataset_name in datasets:
LOGGER.info('Evaluating dataset %s', dataset_name)
train, test, meta, categoricals, ordinals = load_dataset(dataset_name, benchmark=True)
for iteration in range(iterations):
try:
synthesized = synthesizer(train, categoricals, ordinals)
scores = compute_scores(train, test, synthesized, meta)
scores['dataset'] = dataset_name
scores['iteration'] = iteration
results.append(scores)
except Exception:
LOGGER.exception('Error computing scores for %s on dataset %s - iteration %s',
_get_synthesizer_name(synthesizer), dataset_name, iteration)
return pd.concat(results, sort=False)
def _dataset_summary(grouped_df):
dataset = grouped_df.name
scores = grouped_df.mean().dropna()
scores.index = dataset + '/' + scores.index
return scores
def _summarize_scores(scores):
"""Computes a summary of the scores obtained by a synthesizer.
The raw scores returned by the ``compute_benchmark`` function are summarized
by grouping them by dataset and computing the average.
The results are then put in a ``pandas.Series`` object with one value per
dataset and metric.
As an example, the summary of a synthesizer that has been evaluated on the
``adult`` and the ``asia`` datasets produces the following output::
adult/accuracy 0.8765
adult/f1_micro 0.7654
adult/f1_macro 0.7654
asia/syn_likelihood -2.5364
asia/test_likelihood -2.4321
dtype: float64
Args:
scores (pandas.DataFrame):
Raw Scores dataframe as returned by the ``compute_benchmark`` function.
Returns:
pandas.Series:
Summarized scores series in the format described above.
"""
scores = scores.drop(['distance', 'iteration', 'name'], axis=1, errors='ignore')
grouped = scores.groupby('dataset').apply(_dataset_summary)
if isinstance(grouped, pd.Series):
# If more than one dataset, grouped result is a series
# with a multilevel index.
return grouped.droplevel(0)
# Otherwise, if there is only one dataset, it is DataFrame
return grouped.iloc[0]
def _get_synthesizer_name(synthesizer):
"""Get the name of the synthesizer function or class.
If the given synthesizer is a function, return its name.
If it is a method, return the name of the class to which
the method belongs.
Args:
synthesizer (function or method):
The synthesizer function or method.
Returns:
str:
Name of the function or the class to which the method belongs.
"""
if isinstance(synthesizer, types.MethodType):
synthesizer_name = synthesizer.__self__.__class__.__name__
else:
synthesizer_name = synthesizer.__name__
return synthesizer_name
def _get_synthesizers(synthesizers):
"""Get the dict of synthesizers from the input value.
If the input is a synthesizer or an iterable of synthesizers, get their names
and put them on a dict.
Args:
synthesizers (function, class, list, tuple or dict):
A synthesizer (function or method or class) or an iterable of synthesizers
or a dict containing synthesizer names as keys and synthesizers as values.
Returns:
dict[str, function]:
dict containing synthesizer names as keys and function as values.
Raises:
TypeError:
if neither a synthesizer or an iterable or a dict is passed.
"""
if callable(synthesizers):
synthesizers = {_get_synthesizer_name(synthesizers): synthesizers}
if isinstance(synthesizers, (list, tuple)):
synthesizers = {
_get_synthesizer_name(synthesizer): synthesizer
for synthesizer in synthesizers
}
elif not isinstance(synthesizers, dict):
raise TypeError('`synthesizers` can only be a function, a class, a list or a dict')
for name, synthesizer in synthesizers.items():
# If the synthesizer is one of the SDGym Synthesizer classes,
# create and instance and replace it with its fit_sample method.
if isinstance(synthesizer, type) and issubclass(synthesizer, BaseSynthesizer):
synthesizers[name] = synthesizer().fit_sample
return synthesizers
def benchmark(synthesizers, datasets=DEFAULT_DATASETS, iterations=3, add_leaderboard=True,
leaderboard_path=LEADERBOARD_PATH, replace_existing=True):
"""Compute the benchmark scores for the synthesizers and return a leaderboard.
The ``synthesizers`` object can either be a single synthesizer or, an iterable of
synthesizers or a dict containing synthesizer names as keys and synthesizers as values.
If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard
stored in the ``lederboard_path``. By default, the leaderboard used is the one which
is included in the package, which contains the scores obtained by the SDGym Synthesizers.
If ``replace_existing`` is ``True`` and any of the given synthesizers already existed
in the leaderboard, the old rows are dropped.
Args:
synthesizers (function, class, list, tuple or dict):
The synthesizer or synthesizers to evaluate. It can be a single synthesizer
(function or method or class), or an iterable of synthesizers, or a dict
containing synthesizer names as keys and synthesizers as values. If the input
is not a dict, synthesizer names will be extracted from the given object.
datasets (list[str]):
Names of the datasets to use for the benchmark. Defaults to all the ones available.
iterations (int):
Number of iterations to perform over each dataset and synthesizer. Defaults to 3.
add_leaderboard (bool):
Whether to append the obtained scores to the previous leaderboard or not. Defaults
to ``True``.
leaderboard_path (str):
Path to where the leaderboard is stored. Defaults to the leaderboard included
with the package, which contains the scores obtained by the SDGym synthesizers.
replace_existing (bool):
Whether to replace old scores or keep them in the returned leaderboard. Defaults
to ``True``.
Returns:
pandas.DataFrame:
Table containing one row per synthesizer and one column for each dataset and metric.
"""
synthesizers = _get_synthesizers(synthesizers)
scores = list()
for synthesizer_name, synthesizer in synthesizers.items():
synthesizer_scores = compute_benchmark(synthesizer, datasets, iterations)
summary_row = _summarize_scores(synthesizer_scores)
summary_row.name = synthesizer_name
scores.append(summary_row)
leaderboard = pd.DataFrame(scores)
leaderboard['timestamp'] = datetime.utcnow()
if add_leaderboard:
old_leaderboard = pd.read_csv(
leaderboard_path,
index_col=0,
parse_dates=['timestamp']
)[leaderboard.columns]
if replace_existing:
old_leaderboard.drop(labels=[leaderboard.index], errors='ignore', inplace=True)
leaderboard = old_leaderboard.append(leaderboard, sort=False)
return leaderboard
| sdgym/benchmark.py | 9,819 | Get the name of the synthesizer function or class.
If the given synthesizer is a function, return its name.
If it is a method, return the name of the class to which
the method belongs.
Args:
synthesizer (function or method):
The synthesizer function or method.
Returns:
str:
Name of the function or the class to which the method belongs.
Get the dict of synthesizers from the input value.
If the input is a synthesizer or an iterable of synthesizers, get their names
and put them on a dict.
Args:
synthesizers (function, class, list, tuple or dict):
A synthesizer (function or method or class) or an iterable of synthesizers
or a dict containing synthesizer names as keys and synthesizers as values.
Returns:
dict[str, function]:
dict containing synthesizer names as keys and function as values.
Raises:
TypeError:
if neither a synthesizer or an iterable or a dict is passed.
Computes a summary of the scores obtained by a synthesizer.
The raw scores returned by the ``compute_benchmark`` function are summarized
by grouping them by dataset and computing the average.
The results are then put in a ``pandas.Series`` object with one value per
dataset and metric.
As an example, the summary of a synthesizer that has been evaluated on the
``adult`` and the ``asia`` datasets produces the following output::
adult/accuracy 0.8765
adult/f1_micro 0.7654
adult/f1_macro 0.7654
asia/syn_likelihood -2.5364
asia/test_likelihood -2.4321
dtype: float64
Args:
scores (pandas.DataFrame):
Raw Scores dataframe as returned by the ``compute_benchmark`` function.
Returns:
pandas.Series:
Summarized scores series in the format described above.
Compute the benchmark scores for the synthesizers and return a leaderboard.
The ``synthesizers`` object can either be a single synthesizer or, an iterable of
synthesizers or a dict containing synthesizer names as keys and synthesizers as values.
If ``add_leaderboard`` is ``True``, append the obtained scores to the leaderboard
stored in the ``lederboard_path``. By default, the leaderboard used is the one which
is included in the package, which contains the scores obtained by the SDGym Synthesizers.
If ``replace_existing`` is ``True`` and any of the given synthesizers already existed
in the leaderboard, the old rows are dropped.
Args:
synthesizers (function, class, list, tuple or dict):
The synthesizer or synthesizers to evaluate. It can be a single synthesizer
(function or method or class), or an iterable of synthesizers, or a dict
containing synthesizer names as keys and synthesizers as values. If the input
is not a dict, synthesizer names will be extracted from the given object.
datasets (list[str]):
Names of the datasets to use for the benchmark. Defaults to all the ones available.
iterations (int):
Number of iterations to perform over each dataset and synthesizer. Defaults to 3.
add_leaderboard (bool):
Whether to append the obtained scores to the previous leaderboard or not. Defaults
to ``True``.
leaderboard_path (str):
Path to where the leaderboard is stored. Defaults to the leaderboard included
with the package, which contains the scores obtained by the SDGym synthesizers.
replace_existing (bool):
Whether to replace old scores or keep them in the returned leaderboard. Defaults
to ``True``.
Returns:
pandas.DataFrame:
Table containing one row per synthesizer and one column for each dataset and metric.
Compute the scores of a synthesizer over a list of datasets.
The results are returned in a raw format as a ``pandas.DataFrame`` containing:
- One row for each dataset+scoring method (for example, a classifier)
- One column for each computed metric
- The columns:
- dataset
- distance
- name (of the scoring method)
- iteration
For example, evaluating a synthesizer on the ``adult`` and ``asia`` datasets with 2
iterations produces a table similar to this::
dataset name iter distance accuracy f1 syn_likelihood test_likelihood
adult DecisionTree... 0 0.0 0.79 0.65 NaN NaN
adult AdaBoost... 0 0.0 0.85 0.67 NaN NaN
adult Logistic... 0 0.0 0.79 0.66 NaN NaN
adult MLP... 0 0.0 0.84 0.67 NaN NaN
adult DecisionTree... 1 0.0 0.80 0.66 NaN NaN
adult AdaBoost... 1 0.0 0.86 0.68 NaN NaN
adult Logistic... 1 0.0 0.79 0.65 NaN NaN
adult MLP... 1 0.0 0.84 0.64 NaN NaN
asia Bayesian ... 0 0.0 NaN NaN -2.23 -2.24
asia Bayesian ... 1 0.0 NaN NaN -2.23 -2.24
If more than one dataset, grouped result is a series with a multilevel index. Otherwise, if there is only one dataset, it is DataFrame If the synthesizer is one of the SDGym Synthesizer classes, create and instance and replace it with its fit_sample method. | 5,457 | en | 0.825382 |
"""
# Hello
Demonstrate:
* conversion of regular python script into _Jupyter notebook_
* support **Markdown**
* this is a list
"""
from __future__ import absolute_import, print_function, division
"""
## Hello
This is a *hello world* function.
"""
def hello():
"""
This is a docstring
"""
print("hello")
"""
## Another Cell 1
"""
def main():
hello()
"""
### Run this
"""
if __name__ == '__main__':
def what():
main()
print(what())
"""
## Another Cell 2
"""
| tests/example.py | 508 | This is a docstring
# Hello
Demonstrate:
* conversion of regular python script into _Jupyter notebook_
* support **Markdown**
* this is a list | 143 | en | 0.831156 |
"""Parser for envpy config parser"""
# Errors
class EnvpyError(Exception):
"""Base class for all envpy errors."""
class MissingConfigError(EnvpyError):
"""Raised when a config item is missing from the environment and has
no default.
"""
class ValueTypeError(EnvpyError):
"""Raised when a Schema is created with an invalid value type"""
class ParsingError(EnvpyError):
"""Raised when the value pulled from the environment cannot be parsed
as the given value type."""
# Parsers
def _parse_str(value):
return value
def _parse_int(value):
return int(value)
def _parse_float(value):
return float(value)
def _parse_bool(value):
is_true = (
value.upper() == "TRUE"
or value == "1"
)
is_false = (
value.upper() == "FALSE"
or value == "0"
)
if is_true:
return True
elif is_false:
return False
else:
raise ValueError()
PARSERS = {
str: _parse_str,
int: _parse_int,
float: _parse_float,
bool: _parse_bool,
}
# Parsing logic
SENTINAL = object()
class Schema: #pylint: disable=too-few-public-methods
"""Schema that describes a single environment config item
Args:
value_type (optional, default=str): The type that envpy should try to
parse the environment variable into.
default (optional): The value that should be used if the variable
cannot be found in the environment.
"""
def __init__(self, value_type=str, default=SENTINAL):
if value_type not in PARSERS:
raise ValueTypeError()
self._parser = PARSERS.get(value_type)
self._default = default
def parse(self, key, value):
"""Parse the environment value for a given key against the schema.
Args:
key: The name of the environment variable.
value: The value to be parsed.
"""
if value is not None:
try:
return self._parser(value)
except Exception:
raise ParsingError("Error parsing {}".format(key))
elif self._default is not SENTINAL:
return self._default
else:
raise KeyError(key)
def parse_env(config_schema, env):
"""Parse the values from a given environment against a given config schema
Args:
config_schema: A dict which maps the variable name to a Schema object
that describes the requested value.
env: A dict which represents the value of each variable in the
environment.
"""
try:
return {
key: item_schema.parse(key, env.get(key))
for key, item_schema in config_schema.items()
}
except KeyError as error:
raise MissingConfigError(
"Required config not set: {}".format(error.args[0])
)
| envpy/parser.py | 2,871 | Base class for all envpy errors.
Raised when a config item is missing from the environment and has
no default.
Raised when the value pulled from the environment cannot be parsed
as the given value type.
Schema that describes a single environment config item
Args:
value_type (optional, default=str): The type that envpy should try to
parse the environment variable into.
default (optional): The value that should be used if the variable
cannot be found in the environment.
Raised when a Schema is created with an invalid value type
Parse the environment value for a given key against the schema.
Args:
key: The name of the environment variable.
value: The value to be parsed.
Parse the values from a given environment against a given config schema
Args:
config_schema: A dict which maps the variable name to a Schema object
that describes the requested value.
env: A dict which represents the value of each variable in the
environment.
Parser for envpy config parser
Errors Parsers Parsing logicpylint: disable=too-few-public-methods | 1,094 | en | 0.738606 |
# -*- coding: utf-8 -*-
"""Application configuration.
Most configuration is set via environment variables.
For local development, use a .env file to set
environment variables.
"""
from environs import Env
env = Env()
env.read_env()
ENV = env.str("FLASK_ENV", default="production")
DEBUG = ENV == "development"
SQLALCHEMY_DATABASE_URI = env.str("DATABASE_URL")
SECRET_KEY = env.str("SECRET_KEY")
SEND_FILE_MAX_AGE_DEFAULT = env.int("SEND_FILE_MAX_AGE_DEFAULT")
BCRYPT_LOG_ROUNDS = env.int("BCRYPT_LOG_ROUNDS", default=13)
DEBUG_TB_ENABLED = DEBUG
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = "simple" # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
APPLICATION_ROOT = "/"
SCRIPT_NAME = "/"
AUTH_METHOD = env.str("AUTH_METHOD") # can be 'LDAP', 'OMERO'
if AUTH_METHOD == "LDAP":
LDAP_PORT = env.int("LDAP_PORT", 369)
LDAP_HOST = env.str("LDAP_HOST", "localhost")
LDAP_READONLY = env.bool("LDAP_READONLY", True)
LDAP_BASE_DN = env.str("LDAP_BASE_DN", "")
LDAP_BIND_USER_DN = env.str("LDAP_BIND_USER_DN")
LDAP_BIND_USER_PASSWORD = env.str("LDAP_BIND_USER_PASSWORD")
LDAP_BIND_DIRECT_CREDENTIALS = env.bool("LDAP_BIND_DIRECT_CREDENTIALS")
LDAP_ALWAYS_SEARCH_BIND = env.bool("LDAP_ALWAYS_SEARCH_BIND")
LDAP_USER_LOGIN_ATTR = env.str("LDAP_USER_LOGIN_ATTR", "uid")
LDAP_USER_RDN_ATTR = env.str("LDAP_USER_RDN_ATTR", "uid")
LDAP_USER_DN = env.str("LDAP_USER_DN")
LDAP_USER_SEARCH_SCOPE = env.str("LDAP_USER_SEARCH_SCOPE", "LEVEL")
LDAP_SEARCH_FOR_GROUPS = env.bool("LDAP_SEARCH_FOR_GROUPS", False)
elif AUTH_METHOD == "OMERO":
OMERO_HOST = env.str("OMERO_HOST", "localhost")
OMERO_PORT = env.int("OMERO_PORT", 4064)
| cataloger/settings.py | 1,712 | Application configuration.
Most configuration is set via environment variables.
For local development, use a .env file to set
environment variables.
-*- coding: utf-8 -*- Can be "memcached", "redis", etc. can be 'LDAP', 'OMERO' | 231 | en | 0.729694 |
# -*- coding: utf-8 -*-
"""Test human2bytes function."""
import pytest
from pcof import bytesconv
@pytest.mark.parametrize(
"size, unit, result",
[
(1, "KB", "1024.00"),
(1, "MB", "1048576.00"),
(1, "GB", "1073741824.00"),
(1, "TB", "1099511627776.00"),
(1, "PB", "1125899906842624.00"),
(1, "EB", "1152921504606846976.00"),
],
)
def test_human2bytes(size, unit, result):
assert bytesconv.human2bytes(size, unit) == result
@pytest.mark.parametrize(
"size, unit, precision, result",
[
(1, "KB", 0, "1024"),
(2, "GB", 0, "2147483648"),
(2, "GB", 1, "2147483648.0"),
(2, "GB", 3, "2147483648.000"),
],
)
def test_human2bytes_precision(size, unit, precision, result):
assert bytesconv.human2bytes(size, unit, precision=precision) == result
@pytest.mark.parametrize(
"size, unit, base, result",
[
(1, "KB", 1000, "1000.00"),
(1, "MB", 1000, "1000000.00"),
(1, "GB", 1000, "1000000000.00"),
(1, "TB", 1000, "1000000000000.00"),
(4, "TB", 1000, "4000000000000.00"),
(1, "PB", 1000, "1000000000000000.00"),
(1, "EB", 1000, "1000000000000000000.00"),
],
)
def test_human2bytes_base(size, unit, base, result):
assert bytesconv.human2bytes(size, unit, base=base) == result
def test_human2bytes_raise():
with pytest.raises(ValueError, match="value is not a number"):
bytesconv.human2bytes("notnumber", "KB")
with pytest.raises(
ValueError, match="invalid unit. It must be KB, MB, GB, TB, PB, EB, ZB"
):
bytesconv.human2bytes(1, "XX")
# vim: ts=4
| tests/test_bytesconv_human2bytes.py | 1,665 | Test human2bytes function.
-*- coding: utf-8 -*- vim: ts=4 | 60 | en | 0.566855 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import multiprocessing
import sys
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
import six
from .framework import Program, default_main_program, Variable
from . import core
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
__all__ = ['Executor', 'global_scope', 'scope_guard']
g_scope = core.Scope()
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
def global_scope():
"""
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
Returns:
Scope: The global/default scope instance.
"""
return g_scope
def _switch_scope(scope):
global g_scope
ex = g_scope
g_scope = scope
return ex
@signature_safe_contextmanager
def scope_guard(scope):
"""
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Args:
scope: The new global/default scope.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
"""
ex = _switch_scope(scope)
yield
_switch_scope(ex)
def as_numpy(tensor):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
"""
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors hold LoD information. \
They can not be completely cast to Python ndarray. \
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
if tensor._is_initialized():
return np.array(tensor)
else:
return None
def has_feed_operators(block, feed_targets, feed_holder_name):
""" Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
"""
feed_count = 0
for op in block.ops:
if op.desc.type() == 'feed':
feed_count += 1
assert op.desc.input('X')[0] == feed_holder_name
feed_target_name = op.desc.output('Out')[0]
if feed_target_name not in feed_targets:
raise Exception("'feed_targets' does not have {} variable".
format(feed_target_name))
else:
break
if feed_count > 0 and feed_count != len(feed_targets):
raise Exception(
"Feed operators in program desc do not match 'feed_targets'")
return feed_count > 0
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
""" Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
"""
fetch_count = 0
for op in block.ops:
if op.desc.type() == 'fetch':
fetch_count += 1
assert op.desc.output('Out')[0] == fetch_holder_name
fetch_target_name = op.desc.input('X')[0]
if fetch_target_name not in [
var.desc.name() for var in fetch_targets
]:
raise Exception("'fetch_targets' does not have {} variable".
format(fetch_target_name))
idx = op.desc.attr('col')
assert fetch_target_name == fetch_targets[idx].desc.name()
if fetch_count > 0 and fetch_count != len(fetch_targets):
raise Exception(
"Fetch operators in program desc do not match 'fetch_targets'")
return fetch_count > 0
def _fetch_var(name, scope=None, return_numpy=True):
"""
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, str)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(name)
assert var is not None, (
"Cannot find " + name + " in scope. Perhaps you need to make the"
" variable persistable by using var.persistable = True in your"
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
def _to_name_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
else:
raise TypeError(str(var) + " should be Variable or str")
def _get_strong_program_cache_key(program, feed, fetch_list):
return str(id(program)) + _get_program_cache_key(feed, fetch_list)
def _get_program_cache_key(feed, fetch_list):
feed_var_names = list(feed.keys())
fetch_var_names = list(map(_to_name_str, fetch_list))
return str(feed_var_names + fetch_var_names)
def _as_lodtensor(data, place):
"""
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
Returns:
LoDTensor
"""
if isinstance(data, list):
raise RuntimeError("Some of your feed data hold LoD information. \
They can not be completely cast from a list of Python \
ndarray to LoDTensor. Please convert data to LoDTensor \
directly before feeding the data.\
")
# single tensor case
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
class Executor(object):
"""
An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running. Python executor takes a program,
adds feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the
program. fetch_list provides the variables(or names) that user wants
to get after program runs. Note: the executor will run all operators
in the program but not only the operators dependent by the fetch_list.
It stores the global variables into the global scope, and creates a
local scope for the temporary variables. The contents in local scope
may be discarded after every minibatch forward/backward finished.
But the global scope variables will be persistent through different runs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
startup_program.random_seed=1
exe.run(startup_program)
# Run the main program directly without compile.
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(train_program,
feed={"X": x},
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram`
# for more detail.
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
loss_name=loss.name)
loss_data, = exe.run(compiled_prog,
feed={"X": x},
fetch_list=[loss.name])
Args:
place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.
"""
def __init__(self, place):
self.place = place
self.program_caches = dict()
self.ctx_caches = dict()
self.scope_caches = dict()
self.var_caches = dict()
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
def _get_var_cache(self, program_cache_key):
return self.var_caches.get(program_cache_key, None)
def _get_scope_cache(self, program_cache_key):
return self.scope_caches.get(program_cache_key, None)
def _get_ctx_cache(self, program_cache_key):
return self.ctx_caches.get(program_cache_key, None)
def _get_program_cache(self, program_cache_key):
return self.program_caches.get(program_cache_key, None)
def _add_program_cache(self, program_cache_key, program):
self.program_caches[program_cache_key] = program
def _add_ctx_cache(self, ctx_cache_key, ctx):
self.ctx_caches[ctx_cache_key] = ctx
def _add_scope_cache(self, scope_cache_key, scope):
self.scope_caches[scope_cache_key] = scope
def _add_var_cache(self, var_cache_key, var):
self.var_caches[var_cache_key] = var
def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
fetch_var_name):
tmp_program = program.clone()
global_block = tmp_program.global_block()
if feed_var_name in global_block.vars:
feed_var = global_block.var(feed_var_name)
else:
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
if fetch_var_name in global_block.vars:
fetch_var = global_block.var(fetch_var_name)
else:
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
# prepend feed operators
if not has_feed_operators(global_block, feed, feed_var_name):
for i, name in enumerate(feed):
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
# append fetch_operators
if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), (
"Wrong type for fetch_list[%s]: %s" % (i, type(var)))
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
return tmp_program
def _feed_data(self, program, feed, feed_var_name, scope):
# feed var to framework
for op in program.global_block().ops:
if op.desc.type() == 'feed':
feed_target_name = op.desc.output('Out')[0]
cur_feed = feed[feed_target_name]
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = _as_lodtensor(cur_feed, self.place)
idx = op.desc.attr('col')
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
else:
break
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
]
return outs
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def close(self):
"""
Close this executor.
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource
on PServers related to the current Trainer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
"""
if not self._closed:
self._default_executor.close()
self._closed = True
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
return_numpy):
exe = program._executor
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
if not isinstance(feed_tensor, core.LoDTensor):
feed_tensor = core.LoDTensor()
# always set to CPU place, since the tensor need to be splitted
# it is fast in CPU
feed_tensor.set(feed[feed_name], core.CPUPlace())
feed_tensor_dict[feed_name] = feed_tensor
exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
if len(feed) != len(program._places):
raise ValueError(
"Feed a list of tensor, the list should be the same size as places"
)
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
if not isinstance(tensor, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(tensor, program._places[i])
tensor = tmp
res_dict[feed_name] = tensor
res.append(res_dict)
exe.feed_tensors_into_local_scopes(res)
fetch_var_names = list(map(_to_name_str, fetch_list))
exe.run(fetch_var_names, fetch_var_name)
arr = scope.find_var(fetch_var_name).get_lod_tensor_array()
if return_numpy:
return as_numpy(arr)
return [arr[i] for i in range(len(arr))]
def _check_fetch_vars_persistable(self, program, fetch_list):
for var in fetch_list:
if isinstance(var, Variable):
persistable = var.persistable
else:
block_num = program.desc.num_blocks()
persistable = None
var_name = cpt.to_bytes(var)
for i in six.moves.range(block_num):
var_desc = program.desc.block(i).find_var(var_name)
if var_desc:
persistable = var_desc.persistable()
break
assert persistable is not None, "Variable {} is not found".format(
var)
if not persistable:
logging.warn("""
Detect that build_strategy.memory_optimize = True, but the some variables in the fetch
list is not persistable, you may get wrong fetched value, or an exeception may be thrown
about cannot find variable of the fetch list.
TO FIX this:
# Sample
conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None)
# if you need to fetch conv1, then:
conv1.persistable = True
""")
def run(self,
program=None,
feed=None,
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None,
return_numpy=True,
use_program_cache=False):
"""
Run program by this Executor. Feed data by feed map, fetch result by
fetch_list. Python executor takes a program, add feed operators and
fetch operators to this program according to feed map and fetch_list.
Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want to get after program run.
Note: the executor will run all operators in the program but not
only the operators dependent by the fetch_list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
Args:
program(Program|CompiledProgram): the program that need to run,
if not provided, then default_main_program (not compiled) will be used.
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
fetch_list(list): a list of variable or variable names that user
wants to get, this method will return them according to this list.
feed_var_name(str): the name for the input variable of
feed Operator.
fetch_var_name(str): the name for the output variable of
fetch Operator.
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is global_scope
return_numpy(bool): if convert the fetched tensor to numpy
use_program_cache(bool): whether to use the cached program
settings across batches. Setting it be true would be faster
only when (1) the program is not compiled with data parallel,
and (2) program, feed variable names and fetch_list variable
names do not changed compared to the last step.
Returns:
list(numpy.array): fetch result according to fetch_list.
"""
try:
return self._run_impl(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
except Exception as e:
if not isinstance(e, core.EOFException):
print("An exception was thrown!\n {}".format(str(e)))
raise e
def _run_impl(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
compiled = isinstance(program, compiler.CompiledProgram)
# For backward compatibility, run directly.
if not compiled:
return self._run_program(
program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
else:
if fetch_list and program._is_data_parallel and program._program and \
program._build_strategy._use_legacy_memory_optimize_strategy:
self._check_fetch_vars_persistable(program._program, fetch_list)
program._compile(scope, self.place)
if program._is_data_parallel:
return self._run_parallel(
program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy)
elif program._is_inference:
return self._run_inference(program._executor, feed)
else:
# TODO(panyx0718): Can compile program to optimize executor
# performance.
# TODO(panyx0718): executor should be able to run graph.
assert program._program, "CompiledProgram is compiled from graph, can only run with_data_parallel."
# use_program_cache is not valid with CompiledProgram
return self._run_program(
program._program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=False)
def _run_program(self, program, exe, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
if program is None:
program = default_main_program()
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
if use_program_cache:
cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
cached_program = self._get_program_cache(cache_key)
cached_ctx = self._get_ctx_cache(cache_key)
cached_scope = self._get_scope_cache(cache_key)
cached_var = self._get_var_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
fetch_list_str = list(map(_to_name_str, fetch_list))
cached_ctx = self._default_executor.prepare_ctx_cache(
cached_program.desc, 0, fetch_list_str, False)
cached_var = self._default_executor.create_variables(
cached_program.desc, scope, 0)
# currently, we cache program, vars, sub_scope here
# we suppose that in a life cycle of training, a user
# will not create many programs. So, here the basic
# rule of caching is to cache all unseen (program, var, scope)
# when a user use use_program_cache.
cached_scope = scope.new_scope()
self._add_ctx_cache(cache_key, cached_ctx)
self._add_var_cache(cache_key, cached_var)
self._add_scope_cache(cache_key, cached_scope)
program = cached_program
ctx = cached_ctx
scope = cached_scope
var = cached_var
else:
program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
if not use_program_cache:
exe.run(program.desc, scope, 0, True, True, fetch_var_name)
else:
exe.run_cached_prepared_ctx(ctx, scope, False, False, False)
outs = self._fetch_data(fetch_list, fetch_var_name, scope)
if return_numpy:
outs = as_numpy(outs)
return outs
def _run_inference(self, exe, feed):
return exe.run(feed)
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(str(trainer))
if program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
filelist_length = len(dataset.dataset.get_filelist())
if filelist_length < pipeline_num:
pipeline_num = filelist_length
print(
"Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
% (filelist_length, filelist_length))
if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
print(
"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
% (filelist_length // pipeline_num, filelist_length))
pipeline_opt["concurrency_list"][
0] = filelist_length // pipeline_num
dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
return pipeline_num
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if not compiled:
# TODO: Need a better way to distinguish and specify different execution mode
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_program(program)
else:
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program.program._pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(
program.program._fleet_opt)
trainer._set_program(program.program)
# The following thread_num-determined logic will be deprecated
if thread <= 0:
if dataset.thread_num <= 0:
raise RuntimeError(
"You should set thread num first, either in Dataset"
"or in Executor.train_from_dataset")
else:
trainer._set_thread(dataset.thread_num)
else:
trainer._set_thread(thread)
trainer._set_debug(debug)
trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
return scope, trainer
def infer_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
The document of infer_from_dataset is almost the same as
train_from_dataset, except that in distributed training,
push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-thread
very easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable
will be printed during training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is needed and should be initialized")
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(True)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
dataset._finish_to_run()
return None
def train_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread)
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable
will be printed during training
fetch_info(String List): print information for each variable
print_period(int): the number of mini-batches for each print
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is need and should be initialized")
if program._pipeline_opt:
thread = self._adjust_pipeline_resource(program._pipeline_opt,
dataset, thread)
dataset._prepare_to_run()
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._gen_trainer_desc()
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
dataset._finish_to_run()
return None
| python/paddle/fluid/executor.py | 40,765 | An Executor in Python, supports single/multiple-GPU running,
and single/multiple-CPU running. Python executor takes a program,
adds feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the
program. fetch_list provides the variables(or names) that user wants
to get after program runs. Note: the executor will run all operators
in the program but not only the operators dependent by the fetch_list.
It stores the global variables into the global scope, and creates a
local scope for the temporary variables. The contents in local scope
may be discarded after every minibatch forward/backward finished.
But the global scope variables will be persistent through different runs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.compiler as compiler
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
startup_program.random_seed=1
exe.run(startup_program)
# Run the main program directly without compile.
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = exe.run(train_program,
feed={"X": x},
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram`
# for more detail.
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
compiled_prog = compiler.CompiledProgram(
train_program).with_data_parallel(
loss_name=loss.name)
loss_data, = exe.run(compiled_prog,
feed={"X": x},
fetch_list=[loss.name])
Args:
place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device.
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
Returns:
LoDTensor
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
tensor = new_scope.find_var("data").get_tensor()
fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor())
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
Close this executor.
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource
on PServers related to the current Trainer.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cpu = fluid.CPUPlace()
exe = fluid.Executor(cpu)
# execute training or testing
exe.close()
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(fluid.global_scope().find_var("data").get_tensor())
Returns:
Scope: The global/default scope instance.
Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
The document of infer_from_dataset is almost the same as
train_from_dataset, except that in distributed training,
push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-thread
very easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable
will be printed during training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
Run program by this Executor. Feed data by feed map, fetch result by
fetch_list. Python executor takes a program, add feed operators and
fetch operators to this program according to feed map and fetch_list.
Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want to get after program run.
Note: the executor will run all operators in the program but not
only the operators dependent by the fetch_list.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
# First create the Executor.
place = fluid.CPUPlace() # fluid.CUDAPlace(0)
exe = fluid.Executor(place)
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
adam = fluid.optimizer.Adam()
adam.minimize(loss)
# Run the startup program once and only once.
exe.run(fluid.default_startup_program())
x = numpy.random.random(size=(10, 1)).astype('float32')
outs = exe.run(feed={'X': x},
fetch_list=[loss.name])
Args:
program(Program|CompiledProgram): the program that need to run,
if not provided, then default_main_program (not compiled) will be used.
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
fetch_list(list): a list of variable or variable names that user
wants to get, this method will return them according to this list.
feed_var_name(str): the name for the input variable of
feed Operator.
fetch_var_name(str): the name for the output variable of
fetch Operator.
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is global_scope
return_numpy(bool): if convert the fetched tensor to numpy
use_program_cache(bool): whether to use the cached program
settings across batches. Setting it be true would be faster
only when (1) the program is not compiled with data parallel,
and (2) program, feed variable names and fetch_list variable
names do not changed compared to the last step.
Returns:
list(numpy.array): fetch result according to fetch_list.
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Args:
scope: The new global/default scope.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
new_scope = fluid.Scope()
with fluid.scope_guard(new_scope):
fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace())
numpy.array(new_scope.find_var("data").get_tensor())
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread)
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable
will be printed during training
fetch_info(String List): print information for each variable
print_period(int): the number of mini-batches for each print
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64")
y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1)
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(1)
filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. single tensor case prepend feed operators append fetch_operators feed var to framework always set to CPU place, since the tensor need to be splitted it is fast in CPU For backward compatibility, run directly. TODO(panyx0718): Can compile program to optimize executor performance. TODO(panyx0718): executor should be able to run graph. use_program_cache is not valid with CompiledProgram currently, we cache program, vars, sub_scope here we suppose that in a life cycle of training, a user will not create many programs. So, here the basic rule of caching is to cache all unseen (program, var, scope) when a user use use_program_cache. TODO: Need a better way to distinguish and specify different execution mode The following thread_num-determined logic will be deprecated | 15,609 | en | 0.708901 |
import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=['a', 'b', 'c'])
result = df.groupby('a').nth(10)
expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])
assert_frame_equal(result, expected)
result = df.groupby(['a', 'b']).nth(10)
expected = DataFrame(index=MultiIndex([[], []], [[], []],
names=['a', 'b']),
columns=['c'])
assert_frame_equal(result, expected)
| lib/python3.6/site-packages/pandas/tests/groupby/test_nth.py | 9,976 | tests for first / last / nth it works! v0.14.0 whatsnew tests for first / last / nth GH 2763, first/last shifting dtypes out of bounds, regression from 0.13.1 GH 6621 GH 7559 from the vbench validate first this is NOT the same as .first (as sorted is default!) as it keeps the order in the series (and not the group order) related GH 7287 doc example test multiple nth values get the first, fourth and last two business days for each month PR 9090, related to issue 8979 test nth on MultiIndex, should match .first() PR 9090, related to issue 8979 test nth on MultiIndex GH 16064 | 579 | en | 0.733399 |
import tensorflow as tf
class Layers(object):
def __init__(self):
self.name_bank, self.params_trainable = [], []
self.num_params = 0
self.initializer_xavier = tf.initializers.glorot_normal()
def elu(self, inputs): return tf.nn.elu(inputs)
def relu(self, inputs): return tf.nn.relu(inputs)
def sigmoid(self, inputs): return tf.nn.sigmoid(inputs)
def softmax(self, inputs): return tf.nn.softmax(inputs)
def swish(self, inputs): return tf.nn.swish(inputs)
def relu6(self, inputs): return tf.nn.relu6(inputs)
def dropout(self, inputs, rate): return tf.nn.dropout(inputs, rate=rate)
def maxpool(self, inputs, pool_size, stride_size):
return tf.nn.max_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \
padding='VALID', strides=[1, stride_size, stride_size, 1])
def avgpool(self, inputs, pool_size, stride_size):
return tf.nn.avg_pool2d(inputs, ksize=[1, pool_size, pool_size, 1], \
padding='VALID', strides=[1, stride_size, stride_size, 1])
def get_weight(self, vshape, transpose=False, bias=True, name=""):
try:
idx_w = self.name_bank.index("%s_w" %(name))
if(bias): idx_b = self.name_bank.index("%s_b" %(name))
except:
w = tf.Variable(self.initializer_xavier(vshape), \
name="%s_w" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_w" %(name))
self.params_trainable.append(w)
tmpparams = 1
for d in vshape: tmpparams *= d
self.num_params += tmpparams
if(bias):
if(transpose): b = tf.Variable(self.initializer_xavier([vshape[-2]]), \
name="%s_b" %(name), trainable=True, dtype=tf.float32)
else: b = tf.Variable(self.initializer_xavier([vshape[-1]]), \
name="%s_b" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_b" %(name))
self.params_trainable.append(b)
self.num_params += vshape[-2]
else:
w = self.params_trainable[idx_w]
if(bias): b = self.params_trainable[idx_b]
if(bias): return w, b
else: return w
def fullcon(self, inputs, variables):
[weights, biasis] = variables
out = tf.matmul(inputs, weights) + biasis
return out
def conv2d(self, inputs, variables, stride_size, padding):
[weights, biasis] = variables
out = tf.nn.conv2d(inputs, weights, \
strides=[1, stride_size, stride_size, 1], padding=padding) + biasis
return out
def dwconv2d(self, inputs, variables, stride_size, padding):
[weights, biasis] = variables
out = tf.nn.depthwise_conv2d(inputs, weights, \
strides=[1, stride_size, stride_size, 1], padding=padding) + biasis
return out
def batch_norm(self, inputs, name=""):
# https://arxiv.org/pdf/1502.03167.pdf
mean = tf.reduce_mean(inputs)
std = tf.math.reduce_std(inputs)
var = std**2
try:
idx_offset = self.name_bank.index("%s_offset" %(name))
idx_scale = self.name_bank.index("%s_scale" %(name))
except:
offset = tf.Variable(0, \
name="%s_offset" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_offset" %(name))
self.params_trainable.append(offset)
self.num_params += 1
scale = tf.Variable(1, \
name="%s_scale" %(name), trainable=True, dtype=tf.float32)
self.name_bank.append("%s_scale" %(name))
self.params_trainable.append(scale)
self.num_params += 1
else:
offset = self.params_trainable[idx_offset]
scale = self.params_trainable[idx_scale]
offset # zero
scale # one
out = tf.nn.batch_normalization(
x = inputs,
mean=mean,
variance=var,
offset=offset,
scale=scale,
variance_epsilon=1e-12,
name=name
)
return out
| source/layers.py | 4,209 | https://arxiv.org/pdf/1502.03167.pdf zero one | 45 | en | 0.394813 |
# python3 imports
from re import compile as compile_regex
from gettext import gettext as _
# project imports
from wintersdeep_postcode.postcode import Postcode
from wintersdeep_postcode.exceptions.validation_fault import ValidationFault
## A wrapper for validation of standard postcodes
# @remarks see \ref wintersdeep_postcode.postcode_types.standard_postcode
class StandardPostcodeValidator(object):
## Areas that only have single digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlySingleDigitDistricts = []
## Checks if a postcode is in an area with only single digit districts and if
# so - that the district specified is only a single digit.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlySingleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district >= 10:
single_digit_districts = cls.AreasWithOnlySingleDigitDistricts
impacted_by_rule = postcode.outward_area in single_digit_districts
return impacted_by_rule
## Areas that only have double digit districts (ignoring sub-divisions)
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithOnlyDoubleDigitDistricts = []
## Checks if a postcode is in an area with only double digit districts and
# if so - that the district specified has two digits as required.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithOnlyDoubleDigitDistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_district <= 9:
double_digit_districts = cls.AreasWithOnlyDoubleDigitDistricts
impacted_by_rule = postcode.outward_area in double_digit_districts
return impacted_by_rule
## Areas that have a district zero.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithDistrictZero = []
## Checks if a postcode has a district zero if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithDistrictZero(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 0:
areas_with_district_zero = cls.AreasWithDistrictZero
impacted_by_rule = not postcode.outward_area in areas_with_district_zero
return impacted_by_rule
## Areas that do not have a district 10
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithoutDistrictTen = []
## Checks if a postcode has a district ten if it specified one.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithoutDistrictTen(cls, postcode):
impacted_by_rule = False
if postcode.outward_district == 10:
areas_without_district_ten = cls.AreasWithoutDistrictTen
impacted_by_rule = postcode.outward_area in areas_without_district_ten
return impacted_by_rule
## Only a few areas have subdivided districts
# @remarks loaded from JSON file 'standard_postcode_validator.json'
AreasWithSubdistricts = {}
## If a postcode has subdistricts, check its supposed to.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
impacted_by_rule = not postcode.outward_area in areas_with_subdistricts
if not impacted_by_rule:
subdivided_districts_in_area = areas_with_subdistricts[postcode.outward_area]
if subdivided_districts_in_area:
impacted_by_rule = not postcode.outward_district in subdivided_districts_in_area
return impacted_by_rule
## If a postcode has a limited selection of subdistricts, makes sure any set are in scope.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckAreasWithSpecificSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
areas_with_subdistricts = cls.AreasWithSubdistricts
subdivided_districts_in_area = areas_with_subdistricts.get(postcode.outward_area, {})
specific_subdistrict_codes = subdivided_districts_in_area.get(postcode.outward_district, None)
impacted_by_rule = specific_subdistrict_codes and \
not postcode.outward_subdistrict in specific_subdistrict_codes
return impacted_by_rule
## Charactesr that are not used in the first position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
FirstPositionExcludes = []
## Checks that a postcode does not include usued characters in the first postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstPositionExcludes(cls, postcode):
first_postion_char = postcode.outward_area[0]
impacted_by_rule = first_postion_char in cls.FirstPositionExcludes
return impacted_by_rule
## Charactesr that are not used in the second position.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SecondPositionExcludes = []
## Checks that a postcode does not include unused characters in the second postition.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondPositionExcludes(cls, postcode):
impacted_by_rule = False
if len(postcode.outward_area) > 1:
second_postion_char = postcode.outward_area[1]
impacted_by_rule = second_postion_char in cls.SecondPositionExcludes
return impacted_by_rule
## Charactesr that are used in the third apha position (for single digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
SingleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for single digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSingleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 1:
allowed_subdistricts = cls.SingleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are used in the fourth apha position (for double digit areas).
# @remarks loaded from JSON file 'standard_postcode_validator.json'
DoubleDigitAreaSubdistricts = []
## Checks that a postcode does not include unused subdistricts for double digit areas.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckDoubleDigitAreaSubdistricts(cls, postcode):
impacted_by_rule = False
if postcode.outward_subdistrict:
if len(postcode.outward_area) == 2:
allowed_subdistricts = cls.DoubleDigitAreaSubdistricts
subdistrict = postcode.outward_subdistrict
impacted_by_rule = not subdistrict in allowed_subdistricts
return impacted_by_rule
## Charactesr that are not used in the unit string.
# @remarks loaded from JSON file 'standard_postcode_validator.json'
UnitExcludes = []
## Checks that a postcode does not include characters in the first character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckFirstUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[0]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Checks that a postcode does not include characters in the second character of the unit string that are unused.
# @remarks we check the first/second unit character seperately to provide more comprehensive errors.
# @param cls the type of class that is invoking this method.
# @param postcode the postcode to check for conformance to this rule.
# @returns True if the postcode violates this rule, else False.
@classmethod
def CheckSecondUnitCharacterExcludes(cls, postcode):
character = postcode.inward_unit[1]
impacted_by_rule = character in cls.UnitExcludes
return impacted_by_rule
## Loads various static members used for validation of standard postcodes from
# a JSON file - this is expected to be co-located with this class.
def load_validator_params_from_json():
from json import load
from os.path import dirname, join
json_configuration_file = join( dirname(__file__), "standard_postcode_validator.json" )
with open(json_configuration_file, 'r') as file_handle:
config_json = load(file_handle)
StandardPostcodeValidator.AreasWithDistrictZero = config_json['has-district-zero']
StandardPostcodeValidator.AreasWithoutDistrictTen = config_json['no-district-ten']
StandardPostcodeValidator.AreasWithOnlyDoubleDigitDistricts = config_json['double-digit-districts']
StandardPostcodeValidator.AreasWithOnlySingleDigitDistricts = config_json['single-digit-districts']
StandardPostcodeValidator.SingleDigitAreaSubdistricts = config_json['single-digit-area-subdistricts']
StandardPostcodeValidator.DoubleDigitAreaSubdistricts = config_json['double-digit-area-subdistricts']
StandardPostcodeValidator.SecondPositionExcludes = config_json['second-position-excludes']
StandardPostcodeValidator.FirstPositionExcludes = config_json['first-position-excludes']
StandardPostcodeValidator.UnitExcludes = config_json['unit-excludes']
subdivision_map = config_json["subdivided-districts"]
StandardPostcodeValidator.AreasWithSubdistricts = { k: {
int(k1): v1 for k1, v1 in v.items()
} for k, v in subdivision_map.items() }
load_validator_params_from_json()
if __name__ == "__main__":
##
## If this is the main entry point - someone might be a little lost?
##
print(f"{__file__} ran, but doesn't do anything on its own.")
print(f"Check 'https://www.github.com/wintersdeep/wintersdeep_postcode' for usage.") | wintersdeep_postcode/postcode_types/standard_postcode/standard_postcode_validator.py | 12,360 | python3 imports project imports A wrapper for validation of standard postcodes @remarks see \ref wintersdeep_postcode.postcode_types.standard_postcode Areas that only have single digit districts (ignoring sub-divisions) @remarks loaded from JSON file 'standard_postcode_validator.json' Checks if a postcode is in an area with only single digit districts and if so - that the district specified is only a single digit. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Areas that only have double digit districts (ignoring sub-divisions) @remarks loaded from JSON file 'standard_postcode_validator.json' Checks if a postcode is in an area with only double digit districts and if so - that the district specified has two digits as required. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Areas that have a district zero. @remarks loaded from JSON file 'standard_postcode_validator.json' Checks if a postcode has a district zero if it specified one. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Areas that do not have a district 10 @remarks loaded from JSON file 'standard_postcode_validator.json' Checks if a postcode has a district ten if it specified one. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Only a few areas have subdivided districts @remarks loaded from JSON file 'standard_postcode_validator.json' If a postcode has subdistricts, check its supposed to. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. If a postcode has a limited selection of subdistricts, makes sure any set are in scope. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Charactesr that are not used in the first position. @remarks loaded from JSON file 'standard_postcode_validator.json' Checks that a postcode does not include usued characters in the first postition. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Charactesr that are not used in the second position. @remarks loaded from JSON file 'standard_postcode_validator.json' Checks that a postcode does not include unused characters in the second postition. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Charactesr that are used in the third apha position (for single digit areas). @remarks loaded from JSON file 'standard_postcode_validator.json' Checks that a postcode does not include unused subdistricts for single digit areas. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Charactesr that are used in the fourth apha position (for double digit areas). @remarks loaded from JSON file 'standard_postcode_validator.json' Checks that a postcode does not include unused subdistricts for double digit areas. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Charactesr that are not used in the unit string. @remarks loaded from JSON file 'standard_postcode_validator.json' Checks that a postcode does not include characters in the first character of the unit string that are unused. @remarks we check the first/second unit character seperately to provide more comprehensive errors. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Checks that a postcode does not include characters in the second character of the unit string that are unused. @remarks we check the first/second unit character seperately to provide more comprehensive errors. @param cls the type of class that is invoking this method. @param postcode the postcode to check for conformance to this rule. @returns True if the postcode violates this rule, else False. Loads various static members used for validation of standard postcodes from a JSON file - this is expected to be co-located with this class. If this is the main entry point - someone might be a little lost? | 5,188 | en | 0.812558 |
# Generated by Django 2.2.8 on 2019-12-20 17:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('m2mbasic', '0002_auto_20191220_1716'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('similar_products', models.ManyToManyField(related_name='_product_similar_products_+', to='m2mbasic.Product')),
],
),
]
| django/models/m2mbasic/migrations/0003_product.py | 644 | Generated by Django 2.2.8 on 2019-12-20 17:32 | 45 | en | 0.718499 |
"""
Tests for the :mod:`fiftyone.utils.cvat` module.
You must run these tests interactively as follows::
python tests/intensive/cvat_tests.py
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from bson import ObjectId
from collections import defaultdict
import numpy as np
import os
import unittest
import eta.core.utils as etau
import fiftyone as fo
import fiftyone.utils.cvat as fouc
import fiftyone.zoo as foz
from fiftyone.core.expressions import ViewField as F
def _find_shape(anno_json, label_id):
shape = _parse_shapes(anno_json["shapes"], label_id)
if shape is not None:
return shape
for track in anno_json["tracks"]:
shape = _parse_shapes(track["shapes"], label_id)
if shape is not None:
return shape
def _parse_shapes(shapes, label_id):
for shape in shapes:
for attr in shape["attributes"]:
if attr["value"] == label_id:
return shape
def _get_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
return _find_shape(anno_json, label_id)
def _delete_shape(api, task_id, label_id):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
del_json = {"version": 1, "tags": [], "shapes": [shape], "tracks": []}
del_url = api.task_annotation_url(task_id) + "?action=delete"
api.patch(del_url, json=del_json)
def _get_label(api, task_id, label=None):
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if isinstance(label, str):
label = class_id_map[label]
else:
label = list(class_id_map.values())[0]
return label
def _create_annotation(
api, task_id, shape=None, tag=None, track=None, points=None, _type=None
):
if points is None:
points = [10, 20, 30, 40]
if _type is None:
_type = "rectangle"
shapes = []
tags = []
tracks = []
if shape is not None:
if not isinstance(shape, dict):
label = _get_label(api, task_id, label=shape)
shape = {
"type": _type,
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
"points": points,
"occluded": False,
}
shapes = [shape]
if tag is not None:
if not isinstance(tag, dict):
label = _get_label(api, task_id, label=tag)
tag = {
"frame": 0,
"label_id": label,
"group": 0,
"attributes": [],
}
tags = [tag]
if track is not None:
if not isinstance(track, dict):
label = _get_label(api, task_id, label=track)
if isinstance(track, tuple):
start, end = track
else:
start, end = 0, -1
track = {
"frame": start,
"label_id": label,
"group": 0,
"shapes": [
{
"type": _type,
"occluded": False,
"points": points,
"frame": start,
"outside": False,
"attributes": [],
"z_order": 0,
}
],
"attributes": [],
}
if end > start:
track["shapes"].append(
{
"type": _type,
"occluded": False,
"points": points,
"frame": end,
"outside": True,
"attributes": [],
"z_order": 0,
}
)
tracks.append(track)
create_json = {
"version": 1,
"tags": tags,
"shapes": shapes,
"tracks": tracks,
}
create_url = api.task_annotation_url(task_id) + "?action=create"
api.patch(create_url, json=create_json)
def _update_shape(
api,
task_id,
label_id,
label=None,
points=None,
attributes=None,
occluded=None,
):
anno_json = api.get(api.task_annotation_url(task_id)).json()
shape = _find_shape(anno_json, label_id)
if shape is not None:
if points is not None:
shape["points"] = points
if occluded is not None:
shape["occluded"] = occluded
if attributes is not None:
attr_id_map, class_id_map = api._get_attr_class_maps(task_id)
if label is None:
label_id = shape["label_id"]
attr_id_map = attr_id_map[label_id]
else:
label_id = class_id_map[label]
prev_attr_id_map = attr_id_map[shape["label_id"]]
prev_attr_id_map = {v: k for k, v in prev_attr_id_map.items()}
attr_id_map = attr_id_map[label_id]
shape["label_id"] = label_id
for attr in shape["attributes"]:
spec = prev_attr_id_map[attr["spec_id"]]
attr["spec_id"] = attr_id_map[spec]
for attr_name, attr_val in attributes:
if attr_name in attr_id_map:
shape["attributes"].append(
{"spec_id": attr_id_map[attr_name], "value": attr_val}
)
update_json = {
"version": 1,
"tags": [],
"shapes": [shape],
"tracks": [],
}
update_url = api.task_annotation_url(task_id) + "?action=update"
api.patch(update_url, json=update_json)
class CVATTests(unittest.TestCase):
def test_upload(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("ground_truth.detections.id", unwind=True),
)
# Test Videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_ids = dataset.values(
"frames.detections.detections.id", unwind=True
)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="frames.detections",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().frames[1].detections.detections[0].id
self.assertIsNotNone(_get_shape(api, task_id, shape_id))
sample_id = list(list(results.frame_id_map.values())[0].values())[0][
"sample_id"
]
self.assertEqual(sample_id, dataset.first().id)
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
prev_ids,
dataset.values("frames.detections.detections.id", unwind=True),
)
def test_detection_labelling(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:2
].clone()
previous_dataset = dataset.clone()
previous_label_ids = dataset.values(
"ground_truth.detections.id", unwind=True
)
anno_key = "anno_key"
attributes = {"test": {"type": "text"}}
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
attributes=attributes,
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_label_id = previous_label_ids[0]
updated_label_id = previous_label_ids[1]
_delete_shape(api, task_id, deleted_label_id)
_create_annotation(api, task_id, shape=True)
_update_shape(
api, task_id, updated_label_id, attributes=[("test", "1")]
)
dataset.load_annotations(anno_key, cleanup=True)
label_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(label_ids), len(previous_label_ids))
added_label_ids = list(set(label_ids) - set(previous_label_ids))
self.assertEqual(len(added_label_ids), 1)
deleted_label_ids = list(set(previous_label_ids) - set(label_ids))
self.assertEqual(len(deleted_label_ids), 1)
updated_sample = dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
prev_updated_sample = previous_dataset.filter_labels(
"ground_truth", F("_id") == ObjectId(updated_label_id)
).first()
self.assertEqual(len(updated_sample.ground_truth.detections), 1)
self.assertEqual(len(prev_updated_sample.ground_truth.detections), 1)
self.assertEqual(
updated_sample.ground_truth.detections[0].id,
prev_updated_sample.ground_truth.detections[0].id,
)
self.assertEqual(updated_sample.ground_truth.detections[0].test, 1)
api.close()
def test_multiple_fields(self):
dataset = foz.load_zoo_dataset(
"open-images-v6",
split="validation",
label_types=["detections", "segmentations", "classifications"],
classes=["Person"],
max_samples=10,
).clone()
prev_dataset = dataset.clone()
anno_key = "anno_key"
label_schema = {
"detections": {},
"segmentations": {"type": "instances"},
"positive_labels": {},
"negative_labels": {},
}
results = dataset.annotate(
anno_key,
backend="cvat",
label_schema=label_schema,
classes=["Person"],
)
api = results.connect_to_api()
task_id = results.task_ids[0]
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def _remove_bbox(dataset, label_field):
view = dataset.set_field(
"%s.detections" % label_field,
F("detections").map(
F().set_field("bounding_box", []).set_field("mask", None)
),
)
return view
# Ensure ids and attrs are equal
view = _remove_bbox(dataset, "detections")
prev_view = _remove_bbox(prev_dataset, "detections")
self.assertListEqual(
view.values("detections", unwind=True),
prev_view.values("detections", unwind=True),
)
view = _remove_bbox(dataset, "segmentations")
prev_view = _remove_bbox(prev_dataset, "segmentations")
self.assertListEqual(
view.values("segmentations", unwind=True),
prev_view.values("segmentations", unwind=True),
)
self.assertListEqual(
dataset.values("positive_labels", unwind=True),
prev_dataset.values("positive_labels", unwind=True),
)
self.assertListEqual(
dataset.values("negative_labels", unwind=True),
prev_dataset.values("negative_labels", unwind=True),
)
def test_task_creation_arguments(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=4)
.select_fields("ground_truth")
.clone()
)
user = fo.annotation_config.backends.get("cvat", {})
user = user.get("username", None)
users = [user] if user is not None else None
anno_key = "anno_key"
bug_tracker = "test_tracker"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
task_size=2,
segment_size=1,
task_assignee=users,
job_assignees=users,
job_reviewers=users,
issue_tracker=bug_tracker,
)
task_ids = results.task_ids
api = results.connect_to_api()
self.assertEqual(len(task_ids), 2)
for task_id in task_ids:
task_json = api.get(api.task_url(task_id)).json()
self.assertEqual(task_json["bug_tracker"], bug_tracker)
self.assertEqual(task_json["segment_size"], 1)
if user is not None:
self.assertEqual(task_json["assignee"]["username"], user)
for job in api.get(api.jobs_url(task_id)).json():
job_json = api.get(job["url"]).json()
if user is not None:
self.assertEqual(job_json["assignee"]["username"], user)
if api.server_version == 1:
self.assertEqual(
job_json["reviewer"]["username"], user
)
results.print_status()
status = results.get_status()
self.assertEqual(
status["ground_truth"][task_ids[0]]["assignee"]["username"], user,
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_project(self):
dataset = (
foz.load_zoo_dataset("quickstart", max_samples=2)
.select_fields("ground_truth")
.clone()
)
anno_key = "anno_key"
project_name = "cvat_unittest_project"
results = dataset.annotate(
anno_key,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
api = results.connect_to_api()
project_id = api.get_project_id(project_name)
self.assertIsNotNone(project_id)
self.assertIn(project_id, results.project_ids)
anno_key2 = "anno_key2"
results2 = dataset.annotate(
anno_key2,
backend="cvat",
label_field="ground_truth",
project_name=project_name,
)
self.assertNotIn(project_id, results2.project_ids)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
dataset.load_annotations(anno_key2, cleanup=True)
self.assertIsNotNone(api.get_project_id(project_name))
api.delete_project(project_id)
api.close()
api = results.connect_to_api()
self.assertIsNone(api.get_project_id(project_name))
api.close()
def test_example_add_new_label_fields(self):
# Test label field arguments
dataset = foz.load_zoo_dataset("quickstart", max_samples=10).clone()
view = dataset.take(1)
anno_key = "cvat_new_field"
results = view.annotate(
anno_key,
label_field="new_classifications",
label_type="classifications",
classes=["dog", "cat", "person"],
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="dog")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "dog")
# Test label schema
anno_key = "cvat_new_field_schema"
label_schema = {
"new_classifications_2": {
"type": "classifications",
"classes": ["dog", "cat", "person"],
}
}
results = view.annotate(anno_key, label_schema=label_schema)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
api.close()
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(api, task_id, tag="person")
dataset.load_annotations(anno_key, cleanup=True)
tags = view.first().new_classifications_2.classifications
num_tags = len(tags)
self.assertEqual(num_tags, 1)
self.assertEqual(tags[0].label, "person")
dataset.load_annotations(anno_key, cleanup=True)
api.close()
def test_example_restricting_label_edits(self):
dataset = foz.load_zoo_dataset("quickstart").clone()
# Grab a sample that contains at least 2 people
view = dataset.match(
F("ground_truth.detections")
.filter(F("label") == "person")
.length()
> 1
).limit(1)
previous_labels = view.values("ground_truth.detections", unwind=True)
previous_person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
anno_key = "cvat_edit_restrictions"
# The new attributes that we want to populate
attributes = {
"sex": {"type": "select", "values": ["male", "female"],},
"age": {"type": "text",},
}
results = view.annotate(
anno_key,
label_field="ground_truth",
classes=["person", "test"],
attributes=attributes,
allow_additions=False,
allow_deletions=False,
allow_label_edits=False,
allow_spatial_edits=False,
)
self.assertIsNotNone(dataset.get_annotation_info(anno_key))
task_id = results.task_ids[0]
api = results.connect_to_api()
# Delete label
deleted_id = previous_person_labels[0].id
_delete_shape(api, task_id, deleted_id)
# Add label
_create_annotation(api, task_id, shape="person")
# Edit label and bounding box
edited_id = previous_person_labels[1].id
_update_shape(
api,
task_id,
edited_id,
label="test",
points=[10, 20, 30, 40],
attributes=[("sex", "male")],
)
dataset.load_annotations(anno_key, cleanup=True)
api.close()
labels = view.values("ground_truth.detections", unwind=True)
person_labels = view.filter_labels(
"ground_truth", F("label") == "person"
).values("ground_truth.detections", unwind=True)
self.assertListEqual(
[d.label for d in labels], [d.label for d in previous_labels],
)
self.assertListEqual(
[d.bounding_box for d in labels],
[d.bounding_box for d in previous_labels],
)
self.assertListEqual(
[d.id for d in labels], [d.id for d in previous_labels],
)
self.assertEqual(
len(dataset.filter_labels("ground_truth", F("sex") == "male")), 1,
)
def test_issue_1634(self):
# tests: https://github.com/voxel51/fiftyone/issues/1634
dataset = (
foz.load_zoo_dataset("quickstart-video", max_samples=1)
.select_fields("frames.detections")
.clone()
)
anno_key = "issue_1634_test"
results = dataset.annotate(
anno_key,
label_field="frames.ground_truth",
label_type="detections",
classes=["test"],
)
task_id = results.task_ids[0]
api = results.connect_to_api()
# Create overlapping tracks of different type
_create_annotation(
api,
task_id,
track=(0, 30),
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api, task_id, track=(20, 40),
)
api.close()
imported_dataset = fo.Dataset()
with etau.TempDir() as tmp:
fouc.import_annotations(
imported_dataset,
task_ids=[task_id],
data_path=tmp,
download_media=True,
)
imported_dataset.compute_metadata()
self.assertEqual(
imported_dataset.first().metadata.total_frame_count,
dataset.first().metadata.total_frame_count,
)
imported_dataset.export(
export_dir=tmp, dataset_type=fo.types.CVATVideoDataset
)
filename = os.path.splitext(
os.path.basename(imported_dataset.first().filepath)
)[0]
labels_filepath = os.path.join(tmp, "labels", "%s.xml" % filename)
with open(labels_filepath, "r") as f:
label_file_info = f.read()
track_1 = '<track id="1" label="test">'
track_2 = '<track id="2" label="test">'
polygon_frame_0 = '<polygon frame="0"'
polygon_frame_30 = '<polygon frame="30"'
box_frame_20 = '<box frame="20"'
box_frame_40 = '<box frame="40"'
self.assertTrue(track_1 in label_file_info)
self.assertTrue(track_2 in label_file_info)
self.assertTrue(polygon_frame_0 in label_file_info)
self.assertTrue(polygon_frame_30 in label_file_info)
self.assertTrue(box_frame_20 in label_file_info)
self.assertTrue(box_frame_40 in label_file_info)
cvat_video_dataset = fo.Dataset.from_dir(
dataset_dir=tmp, dataset_type=fo.types.CVATVideoDataset,
)
detections = cvat_video_dataset.values(
"frames.detections", unwind=True
)
detections = [i for i in detections if i is not None]
self.assertEqual(len(detections), 20)
polylines = cvat_video_dataset.values(
"frames.polylines", unwind=True
)
polylines = [i for i in polylines if i is not None]
self.assertEqual(len(polylines), 30)
dataset.load_annotations(anno_key, cleanup=True)
def test_deleted_tasks(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "anno_key"
results = dataset.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
api.delete_task(task_id)
status = results.get_status()
api.close()
dataset.load_annotations(anno_key, cleanup=True)
self.assertListEqual(
dataset.values("ground_truth.detections.id", unwind=True),
prev_ids,
)
def test_occluded_attr(self):
dataset = foz.load_zoo_dataset("quickstart", max_samples=1).clone()
anno_key = "cvat_occluded_widget"
# Populate a new `occluded` attribute on the existing `ground_truth` labels
# using CVAT's occluded widget
label_schema = {
"ground_truth": {"attributes": {"occluded": {"type": "occluded",}}}
}
results = dataset.annotate(
anno_key, label_schema=label_schema, backend="cvat"
)
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_update_shape(api, task_id, shape_id, occluded=True)
dataset.load_annotations(anno_key, cleanup=True)
id_occ_map = dict(
zip(
*dataset.values(
[
"ground_truth.detections.id",
"ground_truth.detections.occluded",
],
unwind=True,
)
)
)
self.assertTrue(id_occ_map.pop(shape_id))
self.assertFalse(any(id_occ_map.values()))
def test_map_view_stage(self):
dataset = (
foz.load_zoo_dataset("quickstart")
.select_fields("ground_truth")
.clone()
)
# Get a subset that contains at least 2 objects
dataset = dataset.match(F("ground_truth.detections").length() > 1)[
:1
].clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
# Set one of the detections to upper case
sample = dataset.first()
label = sample.ground_truth.detections[0].label
sample.ground_truth.detections[0].label = label.upper()
sample.save()
prev_unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
labels = dataset.distinct("ground_truth.detections.label")
label_map = {l: l.upper() for l in labels}
view = dataset.map_labels("ground_truth", label_map)
anno_key = "anno_key"
results = view.annotate(
anno_key, backend="cvat", label_field="ground_truth",
)
api = results.connect_to_api()
task_id = results.task_ids[0]
deleted_id = prev_ids[0]
self.assertIsNotNone(_get_shape(api, task_id, deleted_id))
_create_annotation(api, task_id, shape=labels[0].upper())
_delete_shape(api, task_id, deleted_id)
dataset.load_annotations(anno_key, cleanup=True)
loaded_ids = dataset.values("ground_truth.detections.id", unwind=True)
self.assertEqual(len(loaded_ids), len(prev_ids))
# We expect existing labels to have been updated according to the
# mapping
unchanged_label = dataset.select_labels(ids=prev_ids[1]).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertNotEqual(unchanged_label, prev_unchanged_label)
# Expect newly created labels to retain whatever class they were
# annotated as
new_id = list(set(loaded_ids) - set(prev_ids))[0]
new_label = dataset.select_labels(ids=new_id).values(
"ground_truth.detections.label", unwind=True
)[0]
self.assertEqual(labels[0].upper(), new_label)
def test_dest_field(self):
# Test images
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field",
)
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field.detections.id", unwind=True)),
)
# Test dict
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_labels = dataset.values("ground_truth", unwind=True)
anno_key = "test_dest_field"
label_schema = {
"ground_truth": {},
"new_points": {"type": "keypoints", "classes": ["test"],},
"new_polygon": {"type": "polygons", "classes": ["test2"],},
}
results = dataset.annotate(anno_key, label_schema=label_schema)
api = results.connect_to_api()
task_id = results.task_ids[0]
_create_annotation(
api,
task_id,
shape="test",
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
_create_annotation(
api,
task_id,
shape="test2",
_type="polygon",
points=[10, 20, 40, 30, 50, 60],
)
dest_field = {
"ground_truth": "test_field_1",
"new_points": "test_field_2",
}
dataset.load_annotations(
anno_key, cleanup=True, dest_field=dest_field,
)
self.assertFalse(dataset.has_sample_field("new_points"))
self.assertTrue(dataset.has_sample_field("new_polygon"))
self.assertTrue(dataset.has_sample_field("test_field_1"))
self.assertTrue(dataset.has_sample_field("test_field_2"))
self.assertListEqual(
prev_labels, dataset.values("ground_truth", unwind=True),
)
self.assertListEqual(
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
sorted(dataset.values("test_field_1.detections.id", unwind=True)),
)
self.assertEqual(
len(dataset.values("test_field_2.keypoints.id", unwind=True)), 1,
)
self.assertEqual(
len(dataset.values("new_polygon.polylines.id", unwind=True)), 1,
)
# Test modification
dataset = foz.load_zoo_dataset("quickstart", max_samples=2).clone()
prev_ids = dataset.values("ground_truth.detections.id", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="ground_truth")
api = results.connect_to_api()
task_id = results.task_ids[0]
shape_id = dataset.first().ground_truth.detections[0].id
_delete_shape(api, task_id, shape_id)
_create_annotation(api, task_id, shape=True)
_create_annotation(
api,
task_id,
shape=True,
_type="points",
points=[10, 20, 40, 30, 50, 60],
)
dataset.load_annotations(
anno_key, cleanup=True, dest_field="test_field", unexpected="keep",
)
self.assertListEqual(
sorted(prev_ids),
sorted(dataset.values("ground_truth.detections.id", unwind=True)),
)
test_ids = dataset.values("test_field.detections.id", unwind=True)
self.assertEqual(len(set(test_ids) - set(prev_ids)), 1)
self.assertEqual(len(set(prev_ids) - set(test_ids)), 1)
# Test videos
dataset = foz.load_zoo_dataset(
"quickstart-video", max_samples=1
).clone()
prev_labels = dataset.values("frames.detections", unwind=True)
anno_key = "test_dest_field"
results = dataset.annotate(anno_key, label_field="frames.detections")
dataset.load_annotations(
anno_key, cleanup=True, dest_field="frames.test_field",
)
self.assertListEqual(
prev_labels, dataset.values("frames.detections", unwind=True),
)
self.assertListEqual(
sorted(
dataset.values("frames.detections.detections.id", unwind=True)
),
sorted(
dataset.values("frames.test_field.detections.id", unwind=True)
),
)
if __name__ == "__main__":
fo.config.show_progress_bars = False
unittest.main(verbosity=2)
| tests/intensive/cvat_tests.py | 31,748 | Tests for the :mod:`fiftyone.utils.cvat` module.
You must run these tests interactively as follows::
python tests/intensive/cvat_tests.py
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
Test images Test Videos Get a subset that contains at least 2 objects Ensure ids and attrs are equal Test label field arguments Test label schema Grab a sample that contains at least 2 people The new attributes that we want to populate Delete label Add label Edit label and bounding box tests: https://github.com/voxel51/fiftyone/issues/1634 Create overlapping tracks of different type Populate a new `occluded` attribute on the existing `ground_truth` labels using CVAT's occluded widget Get a subset that contains at least 2 objects Set one of the detections to upper case We expect existing labels to have been updated according to the mapping Expect newly created labels to retain whatever class they were annotated as Test images Test dict Test modification Test videos | 1,000 | en | 0.867601 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
class StringProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = ""
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.value_))
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatString(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.STRING,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class Integer32Proto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.value_)
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatInt32(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class Integer64Proto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.value_)
return n + 1
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatInt64(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class BoolProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
return n + 2
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putBoolean(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_value(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormatBool(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class DoubleProto(ProtocolBuffer.ProtocolMessage):
has_value_ = 0
value_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def value(self): return self.value_
def set_value(self, x):
self.has_value_ = 1
self.value_ = x
def clear_value(self):
if self.has_value_:
self.has_value_ = 0
self.value_ = 0.0
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_value()): self.set_value(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
return initialized
def ByteSize(self):
n = 0
return n + 9
def Clear(self):
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(9)
out.putDouble(self.value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 9:
self.set_value(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_value_: res+=prefix+("value: %s\n" % self.DebugFormat(self.value_))
return res
kvalue = 1
_TEXT = (
"ErrorCode",
"value",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.DOUBLE,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class VoidProto(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
_TEXT = (
"ErrorCode",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['StringProto','Integer32Proto','Integer64Proto','BoolProto','DoubleProto','VoidProto']
| google/appengine/api/api_base_pb.py | 10,601 | !/usr/bin/env python Copyright 2007 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 569 | en | 0.841457 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_data
import c3d_model
import numpy as np
# Basic model parameters as external flags.
flags = tf.app.flags
gpu_num = 1
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,c3d_model.NUM_FRAMES_PER_CLIP,c3d_model.CROP_SIZE,
c3d_model.CROP_SIZE,c3d_model.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
return images_placeholder, labels_placeholder
def _variable_on_cpu(name, shape, initializer):
#with tf.device('/cpu:%d' % cpu_id):
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
var = _variable_on_cpu(name, shape, tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.nn.l2_loss(var) * wd
tf.add_to_collection('losses', weight_decay)
return var
def run_test(ds_dir, mean_file, model_name, test_list_file, batch_size):
tf.reset_default_graph()
try:
FLAGS = flags.FLAGS
FLAGS.batch_size = batch_size
except:
flags.DEFINE_integer('batch_size', batch_size, 'Batch size.')
FLAGS = flags.FLAGS
#model_name = "./models-5sec/c3d_ucf_model-4999"
#model_name = "./models.5sec/c3d_ucf_model-75450"
#model_name = "./models-1sec/c3d_ucf_model-4999"
#model_name = "./models.5sec.summarized.1sec/c3d_ucf_model-4999"
#model_name = "./models-multi-5sec-5sec_sum_1/c3d_ucf_model-4999"
#model_name = "./models-multi-5-5sum1/c3d_ucf_model-9999"
num_test_videos = len(list(open(test_list_file,'r')))
print("Number of test videos={}".format(num_test_videos))
# max_bt_sz = -1;min
#
# for factor in range(1, 31):
# if num_test_videos%factor==0:
# max_bt_sz=factor
# if max_bt_sz == 1:
# print("no good batchsize available, setting to 25")
# max_bt_sz = 20
# FLAGS.batch_size = max_bt_sz
# print("batch size:", FLAGS.batch_size)
# Get the sets of images and labels for testing
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size * gpu_num)
with tf.variable_scope('var_name') as var_scope:
weights = {
'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.04, 0.00),
'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.04, 0.00),
'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.04, 0.00),
'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.04, 0.00),
'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.04, 0.00),
'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.04, 0.00),
'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.04, 0.00),
'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.04, 0.00),
'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.04, 0.001),
'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.04, 0.002),
'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.04, 0.005)
}
biases = {
'bc1': _variable_with_weight_decay('bc1', [64], 0.04, 0.0),
'bc2': _variable_with_weight_decay('bc2', [128], 0.04, 0.0),
'bc3a': _variable_with_weight_decay('bc3a', [256], 0.04, 0.0),
'bc3b': _variable_with_weight_decay('bc3b', [256], 0.04, 0.0),
'bc4a': _variable_with_weight_decay('bc4a', [512], 0.04, 0.0),
'bc4b': _variable_with_weight_decay('bc4b', [512], 0.04, 0.0),
'bc5a': _variable_with_weight_decay('bc5a', [512], 0.04, 0.0),
'bc5b': _variable_with_weight_decay('bc5b', [512], 0.04, 0.0),
'bd1': _variable_with_weight_decay('bd1', [4096], 0.04, 0.0),
'bd2': _variable_with_weight_decay('bd2', [4096], 0.04, 0.0),
'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.04, 0.0),
}
logits = []
for gpu_index in range(0, gpu_num):
with tf.device('/gpu:%d' % gpu_index):
logit = c3d_model.inference_c3d(images_placeholder[gpu_index * FLAGS.batch_size:(gpu_index + 1)
* FLAGS.batch_size,:,:,:,:],
0,
FLAGS.batch_size,
weights,
biases)
logits.append(logit)
logits = tf.concat(logits,0)
norm_score = tf.nn.softmax(logits)
saver = tf.train.Saver()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# Restoring a saved model.
if not model_name.__contains__(".meta"):
saver = tf.train.import_meta_graph(model_name+'.meta')
else:
# saver = tf.train.import_meta_graph(model_name)
var_list = [v for v in tf.trainable_variables()]
saver = tf.train.Saver(weights.values() + biases.values())
saver.restore(sess, model_name)
# And then after everything is built, start the testing loop.
bufsize = 0
write_file = open("predict_ret.txt", "w+", bufsize)
next_start_pos = 0
all_steps = int((num_test_videos - 1) / (FLAGS.batch_size * gpu_num) + 1)
print ("num_test_videos, batch_size, gpu_num,all steps", num_test_videos, FLAGS.batch_size, gpu_num, all_steps)
total_testing_duration = 0
for step in range(all_steps):
# Fill a feed dictionary with the actual set of images and labels
# for this particular testing step.
start_time = time.time()
# try:
test_images, test_labels, next_start_pos, _, valid_len = \
input_data.read_clip_and_label(
ds_dir,
mean_file,
test_list_file,
FLAGS.batch_size * gpu_num,
start_pos=next_start_pos,
num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP
)
# except:
# print("exception occured loading at step:", step)
# try:
predict_score = norm_score.eval(
session=sess,
feed_dict={images_placeholder: test_images}
)
# except:
# print("exception occured prediction at step:", step)
duration = time.time() - start_time
print('Step %d: %.3f sec' % (step, duration), 'next start index:', next_start_pos)
total_testing_duration += duration
# try:
for i in range(0, valid_len):
true_label = test_labels[i],
top1_predicted_label = np.argmax(predict_score[i])
# Write results: true label, class prob for true label, predicted label, class prob for predicted label
write_file.write('{}, {}, {}, {}\n'.format(
true_label[0],
predict_score[i][true_label],
top1_predicted_label,
predict_score[i][top1_predicted_label]))
# except:
# print ("exception occured saving predictions at step:", step)
# break # test only 1 batch
print('Prediction time taken =', total_testing_duration)
import datetime
now = datetime.datetime.now()
with open('stats.txt', 'a') as f:
f.write(now.strftime("%Y-%m-%d %H:%M\n"))
f.write(" testing time:"+ str(total_testing_duration) + "\n")
write_file.close()
print("done")
import sys
def main(_):
# run_test(sys.argv[1])
ds_dir = "/home/bassel/data/office-actions/office_actions_19/short_clips/resized_frms"
mean_file = "../c3d_data_preprocessing/oa_kinetics_calculated_mean.npy"
model_name = "c3d_ucf_model-14698"
testing_file = ""
TESTING_BATCH_SIZE = 16
run_test(ds_dir, mean_file, "model/" + model_name, testing_file, TESTING_BATCH_SIZE)
if __name__ == '__main__':
tf.app.run()
| c3d_model/predict_c3d_ucf101.py | 10,602 | Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
Trains and Evaluates the MNIST network using a feed dictionary.
Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=missing-docstring pylint: disable=redefined-builtin Basic model parameters as external flags. Note that the shapes of the placeholders match the shapes of the full image and label tensors, except the first dimension is now batch_size rather than the full size of the train or test data sets.with tf.device('/cpu:%d' % cpu_id):model_name = "./models-5sec/c3d_ucf_model-4999"model_name = "./models.5sec/c3d_ucf_model-75450"model_name = "./models-1sec/c3d_ucf_model-4999"model_name = "./models.5sec.summarized.1sec/c3d_ucf_model-4999"model_name = "./models-multi-5sec-5sec_sum_1/c3d_ucf_model-4999"model_name = "./models-multi-5-5sum1/c3d_ucf_model-9999" max_bt_sz = -1;min for factor in range(1, 31): if num_test_videos%factor==0: max_bt_sz=factor if max_bt_sz == 1: print("no good batchsize available, setting to 25") max_bt_sz = 20 FLAGS.batch_size = max_bt_sz print("batch size:", FLAGS.batch_size) Get the sets of images and labels for testing Restoring a saved model. saver = tf.train.import_meta_graph(model_name) And then after everything is built, start the testing loop. Fill a feed dictionary with the actual set of images and labels for this particular testing step. try: except: print("exception occured loading at step:", step) try: except: print("exception occured prediction at step:", step) try: Write results: true label, class prob for true label, predicted label, class prob for predicted label except: print ("exception occured saving predictions at step:", step) break test only 1 batch run_test(sys.argv[1]) | 2,699 | en | 0.757486 |
#!/usr/bin/env python
"""GRR HTTP server implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import hashlib
import hmac
import logging
import os
import string
from cryptography.hazmat.primitives import constant_time
from future.builtins import int
from future.builtins import str
import jinja2
import psutil
from typing import Text
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing as werkzeug_routing
from werkzeug import wrappers as werkzeug_wrappers
from werkzeug import wsgi as werkzeug_wsgi
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.util import precondition
from grr_response_server import access_control
from grr_response_server import server_logging
from grr_response_server.gui import http_api
from grr_response_server.gui import webauth
CSRF_DELIMITER = b":"
CSRF_TOKEN_DURATION = rdfvalue.Duration("10h")
def GenerateCSRFToken(user_id, time):
"""Generates a CSRF token based on a secret key, id and time."""
precondition.AssertType(user_id, Text)
precondition.AssertOptionalType(time, int)
time = time or rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
secret = config.CONFIG.Get("AdminUI.csrf_secret_key", None)
if secret is None:
raise ValueError("CSRF secret not available.")
digester = hmac.new(secret.encode("ascii"), digestmod=hashlib.sha256)
digester.update(user_id.encode("ascii"))
digester.update(CSRF_DELIMITER)
digester.update(str(time).encode("ascii"))
digest = digester.digest()
token = base64.urlsafe_b64encode(b"%s%s%d" % (digest, CSRF_DELIMITER, time))
return token.rstrip(b"=")
def StoreCSRFCookie(user, response):
"""Decorator for WSGI handler that inserts CSRF cookie into response."""
csrf_token = GenerateCSRFToken(user, None)
response.set_cookie(
"csrftoken", csrf_token, max_age=CSRF_TOKEN_DURATION.seconds)
def ValidateCSRFTokenOrRaise(request):
"""Decorator for WSGI handler that checks CSRF cookie against the request."""
# CSRF check doesn't make sense for GET/HEAD methods, because they can
# (and are) used when downloading files through <a href> links - and
# there's no way to set X-CSRFToken header in this case.
if request.method in ("GET", "HEAD"):
return
# In the ideal world only JavaScript can be used to add a custom header, and
# only within its origin. By default, browsers don't allow JavaScript to
# make cross origin requests.
#
# Unfortunately, in the real world due to bugs in browsers plugins, it can't
# be guaranteed that a page won't set an HTTP request with a custom header
# set. That's why we also check the contents of a header via an HMAC check
# with a server-stored secret.
#
# See for more details:
# https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet
# (Protecting REST Services: Use of Custom Request Headers).
csrf_token = request.headers.get("X-CSRFToken", "").encode("ascii")
if not csrf_token:
logging.info("Did not find headers CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("CSRF token is missing")
try:
decoded = base64.urlsafe_b64decode(csrf_token + b"==")
digest, token_time = decoded.rsplit(CSRF_DELIMITER, 1)
token_time = int(token_time)
except (TypeError, ValueError):
logging.info("Malformed CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Malformed CSRF token")
if len(digest) != hashlib.sha256().digest_size:
logging.info("Invalid digest size for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Malformed CSRF token digest")
expected = GenerateCSRFToken(request.user, token_time)
if not constant_time.bytes_eq(csrf_token, expected):
logging.info("Non-matching CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Non-matching CSRF token")
current_time = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
if current_time - token_time > CSRF_TOKEN_DURATION.microseconds:
logging.info("Expired CSRF token for: %s", request.path)
raise werkzeug_exceptions.Forbidden("Expired CSRF token")
class RequestHasNoUser(AttributeError):
"""Error raised when accessing a user of an unautenticated request."""
class HttpRequest(werkzeug_wrappers.Request):
"""HTTP request object to be used in GRR."""
def __init__(self, *args, **kwargs):
super(HttpRequest, self).__init__(*args, **kwargs)
self._user = None
self.token = None
self.timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.method_metadata = None
self.parsed_args = None
@property
def user(self):
if self._user is None:
raise RequestHasNoUser(
"Trying to access Request.user while user is unset.")
if not self._user:
raise RequestHasNoUser(
"Trying to access Request.user while user is empty.")
return self._user
@user.setter
def user(self, value):
if not isinstance(value, Text):
message = "Expected instance of '%s' but got value '%s' of type '%s'"
message %= (Text, value, type(value))
raise TypeError(message)
self._user = value
def LogAccessWrapper(func):
"""Decorator that ensures that HTTP access is logged."""
def Wrapper(request, *args, **kwargs):
"""Wrapping function."""
try:
response = func(request, *args, **kwargs)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
except Exception: # pylint: disable=g-broad-except
# This should never happen: wrapped function is supposed to handle
# all possible exceptions and generate a proper Response object.
# Still, handling exceptions here to guarantee that the access is logged
# no matter what.
response = werkzeug_wrappers.Response("", status=500)
server_logging.LOGGER.LogHttpAdminUIAccess(request, response)
raise
return response
return Wrapper
def EndpointWrapper(func):
return webauth.SecurityCheck(LogAccessWrapper(func))
class AdminUIApp(object):
"""Base class for WSGI GRR app."""
def __init__(self):
self.routing_map = werkzeug_routing.Map()
self.routing_map.add(
werkzeug_routing.Rule(
"/",
methods=["HEAD", "GET"],
endpoint=EndpointWrapper(self._HandleHomepage)))
self.routing_map.add(
werkzeug_routing.Rule(
"/api/<path:path>",
methods=["HEAD", "GET", "POST", "PUT", "PATCH", "DELETE"],
endpoint=EndpointWrapper(self._HandleApi)))
self.routing_map.add(
werkzeug_routing.Rule(
"/help/<path:path>",
methods=["HEAD", "GET"],
endpoint=EndpointWrapper(self._HandleHelp)))
def _BuildRequest(self, environ):
return HttpRequest(environ)
def _BuildToken(self, request, execution_time):
"""Build an ACLToken from the request."""
token = access_control.ACLToken(
username=request.user,
reason=request.args.get("reason", ""),
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["Remote_Addr", "X-Forwarded-For"]:
remote_addr = request.headers.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def _HandleHomepage(self, request):
"""Renders GRR home page by rendering base.html Jinja template."""
_ = request
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]),
autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {
"heading":
config.CONFIG["AdminUI.heading"],
"report_url":
config.CONFIG["AdminUI.report_url"],
"help_url":
config.CONFIG["AdminUI.help_url"],
"timestamp":
utils.SmartStr(create_time),
"use_precompiled_js":
config.CONFIG["AdminUI.use_precompiled_js"],
# Used in conjunction with FirebaseWebAuthManager.
"firebase_api_key":
config.CONFIG["AdminUI.firebase_api_key"],
"firebase_auth_domain":
config.CONFIG["AdminUI.firebase_auth_domain"],
"firebase_auth_provider":
config.CONFIG["AdminUI.firebase_auth_provider"],
"grr_version":
config.CONFIG["Source.version_string"]
}
template = env.get_template("base.html")
response = werkzeug_wrappers.Response(
template.render(context), mimetype="text/html")
# For a redirect-based Firebase authentication scheme we won't have any
# user information at this point - therefore checking if the user is
# present.
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response
def _HandleApi(self, request):
"""Handles API requests."""
# Checks CSRF token. CSRF token cookie is updated when homepage is visited
# or via GetPendingUserNotificationsCount API call.
ValidateCSRFTokenOrRaise(request)
response = http_api.RenderHttpResponse(request)
# GetPendingUserNotificationsCount is an API method that is meant
# to be invoked very often (every 10 seconds). So it's ideal
# for updating the CSRF token.
# We should also store the CSRF token if it wasn't yet stored at all.
if (("csrftoken" not in request.cookies) or response.headers.get(
"X-API-Method", "") == "GetPendingUserNotificationsCount"):
StoreCSRFCookie(request.user, response)
return response
def _RedirectToRemoteHelp(self, path):
"""Redirect to GitHub-hosted documentation."""
allowed_chars = set(string.ascii_letters + string.digits + "._-/")
if not set(path) <= allowed_chars:
raise RuntimeError("Unusual chars in path %r - "
"possible exploit attempt." % path)
target_path = os.path.join(config.CONFIG["AdminUI.docs_location"], path)
# We have to redirect via JavaScript to have access to and to preserve the
# URL hash. We don't know the hash part of the url on the server.
return werkzeug_wrappers.Response(
"""
<script>
var friendly_hash = window.location.hash;
window.location = '%s' + friendly_hash;
</script>
""" % target_path,
mimetype="text/html")
def _HandleHelp(self, request):
"""Handles help requests."""
help_path = request.path.split("/", 2)[-1]
if not help_path:
raise werkzeug_exceptions.Forbidden("Error: Invalid help path.")
# Proxy remote documentation.
return self._RedirectToRemoteHelp(help_path)
@werkzeug_wsgi.responder
def __call__(self, environ, start_response):
"""Dispatches a request."""
request = self._BuildRequest(environ)
matcher = self.routing_map.bind_to_environ(environ)
try:
endpoint, _ = matcher.match(request.path, request.method)
return endpoint(request)
except werkzeug_exceptions.NotFound as e:
logging.info("Request for non existent url: %s [%s]", request.path,
request.method)
return e
except werkzeug_exceptions.HTTPException as e:
logging.exception("http exception: %s [%s]", request.path, request.method)
return e
def WSGIHandler(self):
"""Returns GRR's WSGI handler."""
sdm = werkzeug_wsgi.SharedDataMiddleware(self, {
"/": config.CONFIG["AdminUI.document_root"],
})
# Use DispatcherMiddleware to make sure that SharedDataMiddleware is not
# used at all if the URL path doesn't start with "/static". This is a
# workaround for cases when unicode URLs are used on systems with
# non-unicode filesystems (as detected by Werkzeug). In this case
# SharedDataMiddleware may fail early while trying to convert the
# URL into the file path and not dispatch the call further to our own
# WSGI handler.
return werkzeug_wsgi.DispatcherMiddleware(self, {
"/static": sdm,
})
| grr/server/grr_response_server/gui/wsgiapp.py | 12,098 | Base class for WSGI GRR app.
HTTP request object to be used in GRR.
Error raised when accessing a user of an unautenticated request.
Generates a CSRF token based on a secret key, id and time.
Decorator that ensures that HTTP access is logged.
Decorator for WSGI handler that inserts CSRF cookie into response.
Decorator for WSGI handler that checks CSRF cookie against the request.
Returns GRR's WSGI handler.
Wrapping function.
Build an ACLToken from the request.
Handles API requests.
Handles help requests.
Renders GRR home page by rendering base.html Jinja template.
Redirect to GitHub-hosted documentation.
Dispatches a request.
GRR HTTP server implementation.
!/usr/bin/env python CSRF check doesn't make sense for GET/HEAD methods, because they can (and are) used when downloading files through <a href> links - and there's no way to set X-CSRFToken header in this case. In the ideal world only JavaScript can be used to add a custom header, and only within its origin. By default, browsers don't allow JavaScript to make cross origin requests. Unfortunately, in the real world due to bugs in browsers plugins, it can't be guaranteed that a page won't set an HTTP request with a custom header set. That's why we also check the contents of a header via an HMAC check with a server-stored secret. See for more details: https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet (Protecting REST Services: Use of Custom Request Headers). pylint: disable=g-broad-except This should never happen: wrapped function is supposed to handle all possible exceptions and generate a proper Response object. Still, handling exceptions here to guarantee that the access is logged no matter what. Used in conjunction with FirebaseWebAuthManager. For a redirect-based Firebase authentication scheme we won't have any user information at this point - therefore checking if the user is present. Checks CSRF token. CSRF token cookie is updated when homepage is visited or via GetPendingUserNotificationsCount API call. GetPendingUserNotificationsCount is an API method that is meant to be invoked very often (every 10 seconds). So it's ideal for updating the CSRF token. We should also store the CSRF token if it wasn't yet stored at all. We have to redirect via JavaScript to have access to and to preserve the URL hash. We don't know the hash part of the url on the server. Proxy remote documentation. Use DispatcherMiddleware to make sure that SharedDataMiddleware is not used at all if the URL path doesn't start with "/static". This is a workaround for cases when unicode URLs are used on systems with non-unicode filesystems (as detected by Werkzeug). In this case SharedDataMiddleware may fail early while trying to convert the URL into the file path and not dispatch the call further to our own WSGI handler. | 2,834 | en | 0.863156 |
# coding: utf8
# try something like
# coding: utf8
# try something like
def index():
rows = db((db.activity.type=='stand')&(db.activity.status=='accepted')).select()
if rows:
return dict(projects=rows)
else:
return plugin_flatpage()
| controllers/stands.py | 262 | coding: utf8 try something like coding: utf8 try something like | 63 | en | 0.858722 |
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative no update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
"""
return tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""Returns the name of current scope as a string, e.g. deepq/q_func"""
return tf.compat.v1.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""Appends parent scope name to `relative_scope_name`"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
if var not in tf.compat.v1.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None):
"""Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_eps")
eps = tf.compat.v1.get_variable("eps", (), initializer=tf.constant_initializer(0))
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(ob, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps)
return act
def build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope="deepq", reuse=None,
param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.compat.v1.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.compat.v1.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.compat.v1.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.compat.v1.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.compat.v1.placeholder(tf.bool, (), name="reset")
eps = tf.compat.v1.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.compat.v1.get_variable("param_noise_scale", (),
initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.compat.v1.get_variable("param_noise_threshold", (),
initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.compat.v1.assign(perturbed_var, var + tf.compat.v1.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(
tf.nn.softmax(q_values) * (tf.compat.v1.log(tf.nn.softmax(q_values)) - tf.compat.v1.log(tf.nn.softmax(q_values_adaptive))),
axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.compat.v1.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(
inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,
update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,
update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True,
update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
def build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,
double_q=True, scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
if param_noise:
act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,
param_noise_filter_func=param_noise_filter_func)
else:
act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)
with tf.compat.v1.variable_scope(scope, reuse=reuse):
# set up placeholders
obs_t_input = make_obs_ph("obs_t")
act_t_ph = tf.compat.v1.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.compat.v1.placeholder(tf.float32, [None], name="reward")
obs_tp1_input = make_obs_ph("obs_tp1")
done_mask_ph = tf.compat.v1.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.compat.v1.placeholder(tf.float32, [None], name="weight")
# q network evaluation
q_t = q_func(obs_t_input.get(), num_actions, scope="q_func", reuse=True) # reuse parameters from act
q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=tf.compat.v1.get_variable_scope().name + "/q_func")
# target q network evalution
q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope="target_q_func")
target_q_func_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=tf.compat.v1.get_variable_scope().name + "/target_q_func")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope="q_func", reuse=True)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)
else:
q_tp1_best = tf.reduce_max(q_tp1, 1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = U.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
# compute optimization op (potentially with gradient clipping)
if grad_norm_clipping is not None:
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
optimize_expr = optimizer.apply_gradients(gradients)
else:
optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# Create callable functions
train = U.function(
inputs=[
obs_t_input,
act_t_ph,
rew_t_ph,
obs_tp1_input,
done_mask_ph,
importance_weights_ph
],
outputs=td_error,
updates=[optimize_expr]
)
update_target = U.function([], [], updates=[update_target_expr])
q_values = U.function([obs_t_input], q_t)
return act_f, train, update_target, {'q_values': q_values}
| baselines/deepq/build_graph.py | 21,701 | Appends parent scope name to `relative_scope_name`
Creates the act function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.compat.v1.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
Returns the name of current scope as a string, e.g. deepq/q_func
Get variables inside a scope
The scope can be specified as a string
Parameters
----------
scope: str or VariableScope
scope in which the variables reside.
trainable_only: bool
whether or not to return only the variables that were marked as trainable.
Returns
-------
vars: [tf.Variable]
list of variables in `scope`.
Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative no update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon to a new value, if negative no update happens
(default: no update)
reset_ph: bool
reset the perturbed policy by sampling a new perturbation
update_param_noise_threshold_ph: float
the desired threshold for the difference between non-perturbed and perturbed policy
update_param_noise_scale_ph: bool
whether or not to update the scale of the noise for the next time it is re-perturbed
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
We never perturb non-trainable vars. We perturb fully-connected layers. The remaining layers are likely conv or layer norm layers, which we do not wish to perturb (in the former case because they only extract features, in the latter case because we use them for normalization purposes). If you change your network, you will likely want to re-consider which layers to perturb and which to keep untouched. Unmodified Q. Perturbable Q used for the actual rollout. We have to wrap this code into a function due to the way tf.cond() works. See https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for a more detailed discussion. Perturb this variable. Do not perturb, just assign. Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy of the network and measures the effect of that perturbation in action space. If the perturbation is too big, reduce scale of perturbation, otherwise increase. Functionality to update the threshold for parameter space noise. Put everything together. set up placeholders q network evaluation reuse parameters from act target q network evalution q scores for actions which we know were selected in the given state. compute estimate of best possible value starting from state at t + 1 compute RHS of bellman equation compute the error (potentially clipped) compute optimization op (potentially with gradient clipping) update_target_fn will be called periodically to copy Q network to target Q network Create callable functions | 10,006 | en | 0.744969 |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import copy
from auto_gen import DBVistrail as _DBVistrail
from auto_gen import DBAdd, DBChange, DBDelete, DBAbstraction, DBGroup, \
DBModule
from id_scope import IdScope
class DBVistrail(_DBVistrail):
def __init__(self, *args, **kwargs):
_DBVistrail.__init__(self, *args, **kwargs)
self.idScope = IdScope(remap={DBAdd.vtType: 'operation',
DBChange.vtType: 'operation',
DBDelete.vtType: 'operation',
DBAbstraction.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
self.idScope.setBeginId('action', 1)
self.db_objects = {}
# keep a reference to the current logging information here
self.log_filename = None
self.log = None
def __copy__(self):
return DBVistrail.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBVistrail.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBVistrail
cp.idScope = copy.copy(self.idScope)
cp.db_objects = copy.copy(self.db_objects)
cp.log_filename = self.log_filename
if self.log is not None:
cp.log = copy.copy(self.log)
else:
cp.log = None
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBVistrail()
new_obj = _DBVistrail.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
if hasattr(old_obj, 'log_filename'):
new_obj.log_filename = old_obj.log_filename
if hasattr(old_obj, 'log'):
new_obj.log = old_obj.log
return new_obj
def update_id_scope(self):
def getOldObjId(operation):
if operation.vtType == 'change':
return operation.db_oldObjId
return operation.db_objectId
def getNewObjId(operation):
if operation.vtType == 'change':
return operation.db_newObjId
return operation.db_objectId
for action in self.db_actions:
self.idScope.updateBeginId('action', action.db_id+1)
if action.db_session is not None:
self.idScope.updateBeginId('session', action.db_session + 1)
for operation in action.db_operations:
self.idScope.updateBeginId('operation', operation.db_id+1)
if operation.vtType == 'add' or operation.vtType == 'change':
# update ids of data
self.idScope.updateBeginId(operation.db_what,
getNewObjId(operation)+1)
if operation.db_data is None:
if operation.vtType == 'change':
operation.db_objectId = operation.db_oldObjId
self.db_add_object(operation.db_data)
for annotation in action.db_annotations:
self.idScope.updateBeginId('annotation', annotation.db_id+1)
def db_add_object(self, obj):
self.db_objects[(obj.vtType, obj.db_id)] = obj
def db_get_object(self, type, id):
return self.db_objects.get((type, id), None)
def db_update_object(self, obj, **kwargs):
# want to swap out old object with a new version
# need this for updating aliases...
# hack it using setattr...
real_obj = self.db_objects[(obj.vtType, obj.db_id)]
for (k, v) in kwargs.iteritems():
if hasattr(real_obj, k):
setattr(real_obj, k, v)
| vistrails/db/versions/v0_9_4/domain/vistrail.py | 5,665 | Copyright (C) 2014-2016, New York University. Copyright (C) 2011-2014, NYU-Poly. Copyright (C) 2006-2011, University of Utah. All rights reserved. Contact: contact@vistrails.org This file is part of VisTrails. "Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the New York University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." keep a reference to the current logging information here update ids of data want to swap out old object with a new version need this for updating aliases... hack it using setattr... | 1,835 | en | 0.870224 |
# Copyright (c) 2013, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from six.moves import range
from six import string_types
import frappe
import json
from frappe.utils import (getdate, cint, add_months, date_diff, add_days,
nowdate, get_datetime_str, cstr, get_datetime, now_datetime, format_datetime)
from datetime import datetime
from calendar import monthrange
from frappe import _, msgprint
from frappe.utils import flt
from frappe.utils import cstr, cint, getdate
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
conditions, filters = get_conditions(filters)
attendance = get_attendance(conditions,filters)
for att in attendance:
data.append(att)
return columns, data
def get_columns():
columns = [
_("ID") + ":Data:200",
_("Attendance Date") + ":Data:200",
_("Employee") + ":Data:120",
_("Employee Name") + ":Data:120",
_("Department") + ":Data:120",
_("Status") + ":Data:120",
# _("Present Shift") + ":Data:120"
]
return columns
def get_attendance(conditions,filters):
attendance = frappe.db.sql("""Select name,employee, employee_name, department,attendance_date, shift,status
From `tabAttendance` Where status = "Absent" and docstatus = 1 and %s group by employee,attendance_date"""% conditions,filters, as_dict=1)
employee = frappe.db.get_all("Employee",{"status":"Active"},["name"])
row = []
emp_count = 0
import pandas as pd
mydates = pd.date_range(filters.from_date, filters.to_date).tolist()
absent_date = []
for emp in employee:
for date in mydates:
for att in attendance:
if emp.name == att.employee:
if att.attendance_date == date.date():
att_date = date.date()
absent_date += [(date.date())]
emp_count += 1
if emp_count >= 3:
for ab_date in absent_date:
row += [(att.name,ab_date,att.employee,att.employee_name,att.department,att.status)]
frappe.errprint(row)
return row
def get_conditions(filters):
conditions = ""
if filters.get("from_date"): conditions += " attendance_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and attendance_date <= %(to_date)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
if filters.get("department"): conditions += " and department = %(department)s"
return conditions, filters | hrpro/hrpro/report/continuous_absent_report/continuous_absent_report.py | 2,718 | Copyright (c) 2013, TeamPRO and contributors For license information, please see license.txt _("Present Shift") + ":Data:120" | 125 | en | 0.658433 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from useradmin.models import HuePermission
try:
perm = HuePermission.objects.get(app='metastore', action='read_only_access')
perm.delete()
except HuePermission.DoesNotExist:
pass
def backwards(self, orm):
perm, created = HuePermission.objects.get_or_create(app='metastore', action='read_only_access')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'useradmin.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'hue_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['useradmin.HuePermission']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.huepermission': {
'Meta': {'object_name': 'HuePermission'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'app': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'through': "orm['useradmin.GroupPermission']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.ldapgroup': {
'Meta': {'object_name': 'LdapGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'HUE'", 'max_length': '64'}),
'home_directory': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['useradmin']
symmetrical = True
| apps/useradmin/src/useradmin/old_migrations/0003_remove_metastore_readonly_huepermission.py | 5,620 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""
.. module: lemur.users.models
:platform: unix
:synopsis: This module contains all of the models need to create a user within
lemur
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from sqlalchemy.orm import relationship
from sqlalchemy import Integer, String, Column, Boolean
from sqlalchemy.event import listen
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.database import db
from lemur.models import roles_users
from lemur.extensions import bcrypt
def hash_password(mapper, connect, target):
"""
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
"""
target.hash_password()
class User(db.Model):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
password = Column(String(128))
active = Column(Boolean())
confirmed_at = Column(ArrowType())
username = Column(String(255), nullable=False, unique=True)
email = Column(String(128), unique=True)
profile_picture = Column(String(255))
roles = relationship('Role', secondary=roles_users, passive_deletes=True, backref=db.backref('user'), lazy='dynamic')
certificates = relationship('Certificate', backref=db.backref('user'), lazy='dynamic')
pending_certificates = relationship('PendingCertificate', backref=db.backref('user'), lazy='dynamic')
authorities = relationship('Authority', backref=db.backref('user'), lazy='dynamic')
keys = relationship('ApiKey', backref=db.backref('user'), lazy='dynamic')
logs = relationship('Log', backref=db.backref('user'), lazy='dynamic')
sensitive_fields = ('password',)
def check_password(self, password):
"""
Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
"""
if self.password:
return bcrypt.check_password_hash(self.password, password)
def hash_password(self):
"""
Generate the secure hash for the password.
:return:
"""
if self.password:
self.password = bcrypt.generate_password_hash(self.password).decode('utf-8')
@property
def is_admin(self):
"""
Determine if the current user has the 'admin' role associated
with it.
:return:
"""
for role in self.roles:
if role.name == 'admin':
return True
def __repr__(self):
return "User(username={username})".format(username=self.username)
listen(User, 'before_insert', hash_password)
| lemur/users/models.py | 2,766 | Hash a given password and check it against the stored value
to determine it's validity.
:param password:
:return:
Helper function that is a listener and hashes passwords before
insertion into the database.
:param mapper:
:param connect:
:param target:
Generate the secure hash for the password.
:return:
Determine if the current user has the 'admin' role associated
with it.
:return:
.. module: lemur.users.models
:platform: unix
:synopsis: This module contains all of the models need to create a user within
lemur
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com> | 701 | en | 0.766496 |
# SPDX-License-Identifier: MIT
# (c) 2019 The TJHSST Director 4.0 Development Team & Contributors
import asyncio
import json
from typing import Any, Dict
import websockets
from docker.models.services import Service
from ..docker.services import get_director_service_name, get_service_by_name
from ..docker.utils import create_client
from ..logs import DirectorSiteLogFollower
from .utils import mainloop_auto_cancel, wait_for_event
async def logs_handler(
websock: websockets.client.WebSocketClientProtocol,
params: Dict[str, Any],
stop_event: asyncio.Event,
) -> None:
client = create_client()
site_id = int(params["site_id"])
service: Service = get_service_by_name(client, get_director_service_name(site_id))
if service is None:
await websock.close()
return
async def echo_loop() -> None:
while True:
try:
msg = json.loads(await websock.recv())
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
break
if isinstance(msg, dict) and "heartbeat" in msg:
try:
await websock.send(json.dumps(msg))
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
break
async def log_loop(log_follower: DirectorSiteLogFollower) -> None:
try:
async for line in log_follower.iter_lines():
if not line:
break
await websock.send(json.dumps({"line": line}))
except (websockets.exceptions.ConnectionClosed, asyncio.CancelledError):
pass
async with DirectorSiteLogFollower(client, site_id) as log_follower:
await log_follower.start(last_n=10)
await mainloop_auto_cancel(
[echo_loop(), log_loop(log_follower), wait_for_event(stop_event)]
)
await websock.close()
| orchestrator/orchestrator/consumers/logs.py | 1,930 | SPDX-License-Identifier: MIT (c) 2019 The TJHSST Director 4.0 Development Team & Contributors | 93 | de | 0.332441 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.