hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e875f9798e9734b835ace23874ebe746b1524a9f | 5,040 | py | Python | mp3tagger.py | ctrl-escp/mp3tagger | 8589279babceea36b187f86062cbc9a1bb7a9d07 | [
"MIT"
] | 1 | 2021-02-22T15:39:21.000Z | 2021-02-22T15:39:21.000Z | mp3tagger.py | ctrl-escp/mp3tagger | 8589279babceea36b187f86062cbc9a1bb7a9d07 | [
"MIT"
] | null | null | null | mp3tagger.py | ctrl-escp/mp3tagger | 8589279babceea36b187f86062cbc9a1bb7a9d07 | [
"MIT"
] | null | null | null | import os
import re
from mutagen.id3 import ID3, TIT2, TALB, TPE1, TRCK, TDRC, TXXX, ID3NoHeaderError
class MP3Tagger:
"""
Iterate over music folders and tag mp3s by extracting the details from
the structure of the folder and the files.
Note: The structure has to be very specific. See example for structure info.
E.g.
Music Band - Album Name (year)/
01 - First Song.mp3
02 - Second Song.mp3
...
Another Band - Different Albums/ # 2 cds
01 - 01 - Song One.mp3 # Track number will appear as 0101
01 - 02 - Song Two.mp3
...
02 - 01 - Song One, Second CD.mp3
02 - 02 - Song Two, Second CD.mp3
...
"""
tag_fields = {
"TIT2": TIT2, # Track title
"TALB": TALB, # Album name
"TPE1": TPE1, # Artist
"TRCK": TRCK, # Track number
"TDRC": TDRC, # Year
"TXXX:TRACKTOTAL": TXXX # Other
}
# Regex to verify if year is appended to the end of the album name
year_regex = re.compile(r".* \(\d{4}\)$")
# Regex to verify if track number is prefixed by album number
double_album_regex = re.compile(r"\d{2} - \d{2} - .*")
# The separator between the track number and the song name
song_split = " - "
def run(self, root_folder):
"""
Entry point for running the MP3Tagger
"""
for folder in os.listdir(root_folder):
if os.path.isdir(folder):
self.parse_folder(os.path.join(root_folder, folder))
def replace_metadata(self, filename, data):
"""
Remove any previous tags from the file and write new ones
:param str filename: The full path for the mp3 file to be tagged
:param dict data: The tag data in the form of valid_tag_field_name:value
"""
try:
tags = ID3(filename)
tags.delete() # Remove all previous tags
except ID3NoHeaderError:
tags = ID3()
for field in data:
tags[field] = self.tag_fields[field](encoding=3, text=data[field])
tags.save(filename)
print(f"Updated {filename}")
def parse_folder(self, folder_full_path):
"""
Extract band and album names from the folder name.
Optionally, extract the year if one can be found at the end of the folder name in parenthesis
:param str folder_full_path: Absolute full path of the target folder
"""
current_album_data = {}
try:
folder_name = os.path.split(folder_full_path)[-1]
artist_name, album_name = folder_name.split(" - ")
if self.year_regex.match(album_name):
year_index = album_name.rfind("(")
current_album_data["TDRC"] = album_name[year_index + 1: -1]
album_name = album_name[:year_index]
current_album_data["TALB"] = album_name
current_album_data["TPE1"] = artist_name
for f in os.listdir(folder_full_path):
full_file_name = os.path.join(folder_full_path, f)
if os.path.isfile(full_file_name) and f.endswith(".mp3"):
self.update_file_in_folder(folder_full_path, f, current_album_data)
print(f"Finished parsing {folder_name}")
except Exception as exp:
print(f"Error parsing {folder_full_path}: {exp}")
def update_file_in_folder(self, full_path, file_name, album_data):
"""
Prepare the tag data with the currently available info from the folder and the filename
:param str full_path: The path the file is located in
:param str file_name: The name of the target file
:param dict album_data: Data already extracted from the folder name
"""
# Handle case where the track number is prefixed by the album number
if self.double_album_regex.match(file_name):
album_data["TRCK"] = "".join(file_name.split(self.song_split)[:2])
else:
album_data["TRCK"] = file_name.split(self.song_split)[0]
album_data["TIT2"] = file_name[file_name.find(self.song_split) + len(self.song_split):]
self.replace_metadata(os.path.join(full_path, file_name), album_data)
if __name__ == '__main__':
from sys import argv
try:
if len(argv) == 2:
start_folder = argv[1]
if os.path.isdir(start_folder):
mp3tagger = MP3Tagger()
mp3tagger.run(os.path.dirname(__file__))
else:
print(f"{start_folder} isn't a valid folder to start with")
else:
script_name = os.path.split(__file__)[-1]
print(f"{script_name.split('.')[0]}\n"
f"{MP3Tagger.__doc__}\n"
f"Usage:\n\tpython {script_name} /path/to/target/folder")
except Exception as e:
print(f"Encountered a problem: {e}")
| 42 | 101 | 0.588294 |
96a9700b886e63b59b5dcee5c52aba8df5e5c99c | 18,357 | py | Python | openpype/hosts/maya/api/setdress.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | openpype/hosts/maya/api/setdress.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | openpype/hosts/maya/api/setdress.py | jonclothcat/OpenPype | d1208cbebc0a7f378de0062ccd653295c6399195 | [
"MIT"
] | null | null | null | import logging
import json
import os
import contextlib
import copy
import six
from maya import cmds
from avalon import io
from openpype.pipeline import (
discover_loader_plugins,
loaders_from_representation,
load_container,
update_container,
remove_container,
get_representation_path,
)
from openpype.hosts.maya.api.lib import (
matrix_equals,
unique_namespace
)
log = logging.getLogger("PackageLoader")
def to_namespace(node, namespace):
"""Return node name as if it's inside the namespace.
Args:
node (str): Node name
namespace (str): Namespace
Returns:
str: The node in the namespace.
"""
namespace_prefix = "|{}:".format(namespace)
node = namespace_prefix.join(node.split("|"))
return node
@contextlib.contextmanager
def namespaced(namespace, new=True):
"""Work inside namespace during context
Args:
new (bool): When enabled this will rename the namespace to a unique
namespace if the input namespace already exists.
Yields:
str: The namespace that is used during the context
"""
original = cmds.namespaceInfo(cur=True)
if new:
namespace = unique_namespace(namespace)
cmds.namespace(add=namespace)
try:
cmds.namespace(set=namespace)
yield namespace
finally:
cmds.namespace(set=original)
@contextlib.contextmanager
def unlocked(nodes):
# Get node state by Maya's uuid
nodes = cmds.ls(nodes, long=True)
uuids = cmds.ls(nodes, uuid=True)
states = cmds.lockNode(nodes, query=True, lock=True)
states = {uuid: state for uuid, state in zip(uuids, states)}
originals = {uuid: node for uuid, node in zip(uuids, nodes)}
try:
cmds.lockNode(nodes, lock=False)
yield
finally:
# Reapply original states
_iteritems = getattr(states, "iteritems", states.items)
for uuid, state in _iteritems():
nodes_from_id = cmds.ls(uuid, long=True)
if nodes_from_id:
node = nodes_from_id[0]
else:
log.debug("Falling back to node name: %s", node)
node = originals[uuid]
if not cmds.objExists(node):
log.warning("Unable to find: %s", node)
continue
cmds.lockNode(node, lock=state)
def load_package(filepath, name, namespace=None):
"""Load a package that was gathered elsewhere.
A package is a group of published instances, possibly with additional data
in a hierarchy.
"""
if namespace is None:
# Define a unique namespace for the package
namespace = os.path.basename(filepath).split(".")[0]
unique_namespace(namespace)
assert isinstance(namespace, six.string_types)
# Load the setdress package data
with open(filepath, "r") as fp:
data = json.load(fp)
# Load the setdress alembic hierarchy
# We import this into the namespace in which we'll load the package's
# instances into afterwards.
alembic = filepath.replace(".json", ".abc")
hierarchy = cmds.file(alembic,
reference=True,
namespace=namespace,
returnNewNodes=True,
groupReference=True,
groupName="{}:{}".format(namespace, name),
typ="Alembic")
# Get the top root node (the reference group)
root = "{}:{}".format(namespace, name)
containers = []
all_loaders = discover_loader_plugins()
for representation_id, instances in data.items():
# Find the compatible loaders
loaders = loaders_from_representation(
all_loaders, representation_id
)
for instance in instances:
container = _add(instance=instance,
representation_id=representation_id,
loaders=loaders,
namespace=namespace,
root=root)
containers.append(container)
# TODO: Do we want to cripple? Or do we want to add a 'parent' parameter?
# Cripple the original avalon containers so they don't show up in the
# manager
# for container in containers:
# cmds.setAttr("%s.id" % container,
# "setdress.container",
# type="string")
# TODO: Lock all loaded nodes
# This is to ensure the hierarchy remains unaltered by the artists
# for node in nodes:
# cmds.lockNode(node, lock=True)
return containers + hierarchy
def _add(instance, representation_id, loaders, namespace, root="|"):
"""Add an item from the package
Args:
instance (dict):
representation_id (str):
loaders (list):
namespace (str):
Returns:
str: The created Avalon container.
"""
from openpype.hosts.maya.lib import get_container_transforms
# Process within the namespace
with namespaced(namespace, new=False) as namespace:
# Get the used loader
Loader = next((x for x in loaders if
x.__name__ == instance['loader']),
None)
if Loader is None:
log.warning("Loader is missing: %s. Skipping %s",
instance['loader'], instance)
raise RuntimeError("Loader is missing.")
container = load_container(
Loader,
representation_id,
namespace=instance['namespace']
)
# Get the root from the loaded container
loaded_root = get_container_transforms({"objectName": container},
root=True)
# Apply matrix to root node (if any matrix edits)
matrix = instance.get("matrix", None)
if matrix:
cmds.xform(loaded_root, objectSpace=True, matrix=matrix)
# Parent into the setdress hierarchy
# Namespace is missing from parent node(s), add namespace
# manually
parent = root + to_namespace(instance["parent"], namespace)
cmds.parent(loaded_root, parent, relative=True)
return container
# Store root nodes based on representation and namespace
def _instances_by_namespace(data):
"""Rebuild instance data so we can look it up by namespace.
Note that the `representation` is added into the instance's
data with a `representation` key.
Args:
data (dict): scene build data
Returns:
dict
"""
result = {}
# Add new assets
for representation_id, instances in data.items():
# Ensure we leave the source data unaltered
instances = copy.deepcopy(instances)
for instance in instances:
instance['representation'] = representation_id
result[instance['namespace']] = instance
return result
def get_contained_containers(container):
"""Get the Avalon containers in this container
Args:
container (dict): The container dict.
Returns:
list: A list of member container dictionaries.
"""
import avalon.schema
from .pipeline import parse_container
# Get avalon containers in this package setdress container
containers = []
members = cmds.sets(container['objectName'], query=True)
for node in cmds.ls(members, type="objectSet"):
try:
member_container = parse_container(node)
containers.append(member_container)
except avalon.schema.ValidationError:
pass
return containers
def update_package_version(container, version):
"""
Update package by version number
Args:
container (dict): container data of the container node
version (int): the new version number of the package
Returns:
None
"""
# Versioning (from `core.maya.pipeline`)
current_representation = io.find_one({
"_id": io.ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
version_, subset, asset, project = io.parenthood(current_representation)
if version == -1:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
})
assert new_version is not None, "This is a bug"
# Get the new representation (new file)
new_representation = io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
})
update_package(container, new_representation)
def update_package(set_container, representation):
"""Update any matrix changes in the scene based on the new data
Args:
set_container (dict): container data from `ls()`
representation (dict): the representation document from the database
Returns:
None
"""
# Load the original package data
current_representation = io.find_one({
"_id": io.ObjectId(set_container['representation']),
"type": "representation"
})
current_file = get_representation_path(current_representation)
assert current_file.endswith(".json")
with open(current_file, "r") as fp:
current_data = json.load(fp)
# Load the new package data
new_file = get_representation_path(representation)
assert new_file.endswith(".json")
with open(new_file, "r") as fp:
new_data = json.load(fp)
# Update scene content
containers = get_contained_containers(set_container)
update_scene(set_container, containers, current_data, new_data, new_file)
# TODO: This should be handled by the pipeline itself
cmds.setAttr(set_container['objectName'] + ".representation",
str(representation['_id']), type="string")
def update_scene(set_container, containers, current_data, new_data, new_file):
"""Updates the hierarchy, assets and their matrix
Updates the following within the scene:
* Setdress hierarchy alembic
* Matrix
* Parenting
* Representations
It removes any assets which are not present in the new build data
Args:
set_container (dict): the setdress container of the scene
containers (list): the list of containers under the setdress container
current_data (dict): the current build data of the setdress
new_data (dict): the new build data of the setdres
Returns:
processed_containers (list): all new and updated containers
"""
from openpype.hosts.maya.lib import DEFAULT_MATRIX, get_container_transforms
set_namespace = set_container['namespace']
# Update the setdress hierarchy alembic
set_root = get_container_transforms(set_container, root=True)
set_hierarchy_root = cmds.listRelatives(set_root, fullPath=True)[0]
set_hierarchy_reference = cmds.referenceQuery(set_hierarchy_root,
referenceNode=True)
new_alembic = new_file.replace(".json", ".abc")
assert os.path.exists(new_alembic), "%s does not exist." % new_alembic
with unlocked(cmds.listRelatives(set_root, ad=True, fullPath=True)):
cmds.file(new_alembic,
loadReference=set_hierarchy_reference,
type="Alembic")
identity = DEFAULT_MATRIX[:]
processed_namespaces = set()
processed_containers = list()
new_lookup = _instances_by_namespace(new_data)
old_lookup = _instances_by_namespace(current_data)
for container in containers:
container_ns = container['namespace']
# Consider it processed here, even it it fails we want to store that
# the namespace was already available.
processed_namespaces.add(container_ns)
processed_containers.append(container['objectName'])
if container_ns in new_lookup:
root = get_container_transforms(container, root=True)
if not root:
log.error("Can't find root for %s", container['objectName'])
continue
old_instance = old_lookup.get(container_ns, {})
new_instance = new_lookup[container_ns]
# Update the matrix
# check matrix against old_data matrix to find local overrides
current_matrix = cmds.xform(root,
query=True,
matrix=True,
objectSpace=True)
original_matrix = old_instance.get("matrix", identity)
has_matrix_override = not matrix_equals(current_matrix,
original_matrix)
if has_matrix_override:
log.warning("Matrix override preserved on %s", container_ns)
else:
new_matrix = new_instance.get("matrix", identity)
cmds.xform(root, matrix=new_matrix, objectSpace=True)
# Update the parenting
if old_instance.get("parent", None) != new_instance["parent"]:
parent = to_namespace(new_instance['parent'], set_namespace)
if not cmds.objExists(parent):
log.error("Can't find parent %s", parent)
continue
# Set the new parent
cmds.lockNode(root, lock=False)
root = cmds.parent(root, parent, relative=True)
cmds.lockNode(root, lock=True)
# Update the representation
representation_current = container['representation']
representation_old = old_instance['representation']
representation_new = new_instance['representation']
has_representation_override = (representation_current !=
representation_old)
if representation_new != representation_current:
if has_representation_override:
log.warning("Your scene had local representation "
"overrides within the set. New "
"representations not loaded for %s.",
container_ns)
continue
# We check it against the current 'loader' in the scene instead
# of the original data of the package that was loaded because
# an Artist might have made scene local overrides
if new_instance['loader'] != container['loader']:
log.warning("Loader is switched - local edits will be "
"lost. Removing: %s",
container_ns)
# Remove this from the "has been processed" list so it's
# considered as new element and added afterwards.
processed_containers.pop()
processed_namespaces.remove(container_ns)
remove_container(container)
continue
# Check whether the conversion can be done by the Loader.
# They *must* use the same asset, subset and Loader for
# `update_container` to make sense.
old = io.find_one({
"_id": io.ObjectId(representation_current)
})
new = io.find_one({
"_id": io.ObjectId(representation_new)
})
is_valid = compare_representations(old=old, new=new)
if not is_valid:
log.error("Skipping: %s. See log for details.",
container_ns)
continue
new_version = new["context"]["version"]
update_container(container, version=new_version)
else:
# Remove this container because it's not in the new data
log.warning("Removing content: %s", container_ns)
remove_container(container)
# Add new assets
all_loaders = discover_loader_plugins()
for representation_id, instances in new_data.items():
# Find the compatible loaders
loaders = loaders_from_representation(
all_loaders, representation_id
)
for instance in instances:
# Already processed in update functionality
if instance['namespace'] in processed_namespaces:
continue
container = _add(instance=instance,
representation_id=representation_id,
loaders=loaders,
namespace=set_container['namespace'],
root=set_root)
# Add to the setdress container
cmds.sets(container,
addElement=set_container['objectName'])
processed_containers.append(container)
return processed_containers
def compare_representations(old, new):
"""Check if the old representation given can be updated
Due to limitations of the `update_container` function we cannot allow
differences in the following data:
* Representation name (extension)
* Asset name
* Subset name (variation)
If any of those data values differs, the function will raise an
RuntimeError
Args:
old(dict): representation data from the database
new(dict): representation data from the database
Returns:
bool: False if the representation is not invalid else True
"""
if new["name"] != old["name"]:
log.error("Cannot switch extensions")
return False
new_context = new["context"]
old_context = old["context"]
if new_context["asset"] != old_context["asset"]:
log.error("Changing assets between updates is "
"not supported.")
return False
if new_context["subset"] != old_context["subset"]:
log.error("Changing subsets between updates is "
"not supported.")
return False
return True
| 32.318662 | 80 | 0.60402 |
ed35571053a0ce06541f5834b7022475a44234f7 | 5,226 | py | Python | matplotlib/tutorials_python/advanced/patheffects_guide.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | matplotlib/tutorials_python/advanced/patheffects_guide.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | matplotlib/tutorials_python/advanced/patheffects_guide.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | """
==================
Path effects guide
==================
Defining paths that objects follow on a canvas.
.. py:currentmodule:: matplotlib.patheffects
Matplotlib's :mod:`~matplotlib.patheffects` module provides functionality to
apply a multiple draw stage to any Artist which can be rendered via a
:class:`~matplotlib.path.Path`.
Artists which can have a path effect applied to them include :class:`~matplotlib.patches.Patch`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.collections.Collection` and even
:class:`~matplotlib.text.Text`. Each artist's path effects can be controlled via the
``set_path_effects`` method (:class:`~matplotlib.artist.Artist.set_path_effects`), which takes
an iterable of :class:`AbstractPathEffect` instances.
The simplest path effect is the :class:`Normal` effect, which simply
draws the artist without any effect:
"""
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
fig = plt.figure(figsize=(5, 1.5))
text = fig.text(0.5, 0.5, 'Hello path effects world!\nThis is the normal '
'path effect.\nPretty dull, huh?',
ha='center', va='center', size=20)
text.set_path_effects([path_effects.Normal()])
plt.show()
###############################################################################
# Whilst the plot doesn't look any different to what you would expect without any path
# effects, the drawing of the text now been changed to use the path effects
# framework, opening up the possibilities for more interesting examples.
#
# Adding a shadow
# ---------------
#
# A far more interesting path effect than :class:`Normal` is the
# drop-shadow, which we can apply to any of our path based artists. The classes
# :class:`SimplePatchShadow` and
# :class:`SimpleLineShadow` do precisely this by drawing either a filled
# patch or a line patch below the original artist:
import matplotlib.patheffects as path_effects
text = plt.text(0.5, 0.5, 'Hello path effects world!',
path_effects=[path_effects.withSimplePatchShadow()])
plt.plot([0, 3, 2, 5], linewidth=5, color='blue',
path_effects=[path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.show()
###############################################################################
# Notice the two approaches to setting the path effects in this example. The
# first uses the ``with*`` classes to include the desired functionality automatically
# followed with the "normal" effect, whereas the latter explicitly defines the two path
# effects to draw.
#
# Making an artist stand out
# --------------------------
#
# One nice way of making artists visually stand out is to draw an outline in a bold
# color below the actual artist. The :class:`Stroke` path effect
# makes this a relatively simple task:
fig = plt.figure(figsize=(7, 1))
text = fig.text(0.5, 0.5, 'This text stands out because of\n'
'its black border.', color='white',
ha='center', va='center', size=30)
text.set_path_effects([path_effects.Stroke(linewidth=3, foreground='black'),
path_effects.Normal()])
plt.show()
###############################################################################
# It is important to note that this effect only works because we have drawn the text
# path twice; once with a thick black line, and then once with the original text
# path on top.
#
# You may have noticed that the keywords to :class:`Stroke` and
# :class:`SimplePatchShadow` and :class:`SimpleLineShadow` are not the usual Artist
# keywords (such as ``facecolor`` and ``edgecolor`` etc.). This is because with these
# path effects we are operating at lower level of matplotlib. In fact, the keywords
# which are accepted are those for a :class:`matplotlib.backend_bases.GraphicsContextBase`
# instance, which have been designed for making it easy to create new backends - and not
# for its user interface.
#
#
# Greater control of the path effect artist
# -----------------------------------------
#
# As already mentioned, some of the path effects operate at a lower level than most users
# will be used to, meaning that setting keywords such as ``facecolor`` and ``edgecolor``
# raise an AttributeError. Luckily there is a generic :class:`PathPatchEffect` path effect
# which creates a :class:`~matplotlib.patches.PathPatch` class with the original path.
# The keywords to this effect are identical to those of :class:`~matplotlib.patches.PathPatch`:
fig = plt.figure(figsize=(8, 1))
t = fig.text(0.02, 0.5, 'Hatch shadow', fontsize=75, weight=1000, va='center')
t.set_path_effects([path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', linewidth=1.1,
facecolor='black')])
plt.show()
###############################################################################
# ..
# Headings for future consideration:
#
# Implementing a custom path effect
# ---------------------------------
#
# What is going on under the hood
# --------------------------------
| 43.55 | 96 | 0.638921 |
77fba48dd8a6f1f6749e13aa819b52a95f3dc073 | 9,714 | py | Python | utils/generic/create_report.py | WadhwaniAI/covid-modelling | db9f89bfbec392ad4de6b4583cfab7c3d823c1c9 | [
"MIT"
] | 3 | 2021-06-23T10:27:11.000Z | 2022-02-09T07:50:42.000Z | utils/generic/create_report.py | WadhwaniAI/covid-modelling | db9f89bfbec392ad4de6b4583cfab7c3d823c1c9 | [
"MIT"
] | 3 | 2021-06-23T09:36:29.000Z | 2022-01-13T03:38:16.000Z | utils/generic/create_report.py | WadhwaniAI/covid-modelling | db9f89bfbec392ad4de6b4583cfab7c3d823c1c9 | [
"MIT"
] | null | null | null | import copy
import json
import os
import pickle
from pprint import pformat
import numpy as np
import pandas as pd
import pypandoc
import yaml
from mdutils.mdutils import MdUtils
from utils.fitting.util import CustomEncoder
from utils.generic.config import make_date_str
def create_output(predictions_dict, output_folder, tag):
"""Custom output generation function"""
directory = f'{output_folder}/{tag}'
if not os.path.exists(directory):
os.makedirs(directory)
d = {}
for inner in ['variable_param_ranges', 'best_params', 'beta_loss']:
if inner in predictions_dict:
with open(f'{directory}/{inner}.json', 'w') as f:
json.dump(predictions_dict[inner], f, indent=4)
for inner in ['df_prediction', 'df_district', 'df_train', 'df_val', 'df_loss', 'df_district_unsmoothed']:
if inner in predictions_dict and predictions_dict[inner] is not None:
predictions_dict[inner].to_csv(f'{directory}/{inner}.csv')
for inner in ['trials', 'run_params', 'plots', 'smoothing_description', 'default_params']:
with open(f'{directory}/{inner}.pkl', 'wb') as f:
pickle.dump(predictions_dict[inner], f)
if 'ensemble_mean' in predictions_dict['forecasts']:
predictions_dict['forecasts']['ensemble_mean'].to_csv(
f'{directory}/ensemble_mean_forecast.csv')
predictions_dict['trials']['predictions'][0].to_csv(
f'{directory}/trials_predictions.csv')
np.save(f'{directory}/trials_params.npy',
predictions_dict['trials']['params'])
np.save(f'{directory}/trials_losses.npy',
predictions_dict['trials']['losses'])
d[f'data_last_date'] = predictions_dict['data_last_date']
d['fitting_date'] = predictions_dict['fitting_date']
np.save(f'{directory}/beta.npy', predictions_dict['beta'])
with open(f'{directory}/other.json', 'w') as f:
json.dump(d, f, indent=4)
with open(f'{directory}/config.json', 'w') as f:
json.dump(make_date_str(
predictions_dict['config']), f, indent=4, cls=CustomEncoder)
with open(f'{directory}/config.yaml', 'w') as f:
yaml.dump(make_date_str(predictions_dict['config']), f)
def _dump_predictions_dict(predictions_dict, ROOT_DIR):
try:
del predictions_dict['run_params']['model_class']
except Exception:
pass
filepath = os.path.join(ROOT_DIR, 'predictions_dict.pkl')
with open(filepath, 'wb+') as dump:
pickle.dump(predictions_dict, dump)
def _dump_params(predictions_dict, ROOT_DIR):
filepath = os.path.join(ROOT_DIR, 'params.json')
with open(filepath, 'w+') as dump:
run_params = copy.copy(predictions_dict['run_params'])
del run_params['model_class']
del run_params['variable_param_ranges']
json.dump(run_params, dump, indent=4)
def _save_trials(predictions_dict, ROOT_DIR):
predictions_dict['all_trials'].to_csv(os.path.join(ROOT_DIR, 'trials.csv'))
def _create_md_file(predictions_dict, config, ROOT_DIR):
fitting_date = predictions_dict['fitting_date']
data_last_date = predictions_dict['data_last_date']
ld = config['fitting']['data']['dataloading_params']['location_description']
filename = os.path.join(ROOT_DIR, f'{ld}_report_{fitting_date}')
mdFile = MdUtils(file_name=filename, title=f'{ld} Fit [Based on data until {data_last_date}]')
return mdFile, filename
def _log_hyperparams(mdFile, predictions_dict):
mdFile.new_paragraph("---")
mdFile.new_paragraph(f"Data available till: {predictions_dict['data_last_date']}")
mdFile.new_paragraph(f"Fitting Date: {predictions_dict['fitting_date']}")
def _log_plots_util(mdFile, ROOT_DIR, plot_filename, figure, fig_text):
os.makedirs(os.path.join(os.path.abspath(ROOT_DIR), 'plots'), exist_ok=True)
plot_filepath = os.path.join(os.path.abspath(ROOT_DIR), 'plots', plot_filename)
figure.savefig(plot_filepath)
mdFile.new_line(mdFile.new_inline_image(text=fig_text, path=plot_filepath))
mdFile.new_paragraph("")
def _log_smoothing(mdFile, ROOT_DIR, fit_dict):
mdFile.new_header(level=1, title=f'SMOOTHING')
_log_plots_util(mdFile, ROOT_DIR, 'smoothing.png',
fit_dict['plots']['smoothing'], 'Smoothing Plot')
for sentence in fit_dict['smoothing_description'].split('\n'):
mdFile.new_paragraph(sentence)
mdFile.new_paragraph("")
def _log_fits(mdFile, ROOT_DIR, fit_dict):
mdFile.new_header(level=1, title=f'FIT')
mdFile.new_header(level=2, title=f'Optimal Parameters')
mdFile.insert_code(pformat(fit_dict['trials']['params'][0]))
mdFile.new_header(level=2, title=f'MAPE Loss Values')
mdFile.new_paragraph(fit_dict['df_loss'].to_markdown())
mdFile.new_header(level=2, title=f'Fit Curves')
_log_plots_util(mdFile, ROOT_DIR, f'fit.png',
fit_dict['plots']['fit'], f'Fit Curve')
def _log_uncertainty_fit(mdFile, fit_dict):
mdFile.new_paragraph(f"beta - {fit_dict['beta']}")
mdFile.new_paragraph(f"beta loss")
mdFile.insert_code(pformat(fit_dict['beta_loss']))
def _log_forecasts(mdFile, ROOT_DIR, fit_dict):
plots_to_save = []
for key, val in fit_dict['plots'].items():
if 'forecast_' in key and type(val) != dict:
plots_to_save.append(key)
for key in plots_to_save:
_log_plots_util(mdFile, ROOT_DIR, f'{key}.png',
fit_dict['plots'][f'{key}'], key)
for column, figure in fit_dict['plots']['forecasts_topk'].items():
_log_plots_util(mdFile, ROOT_DIR, f'forecast-topk-{column}.png',
figure, f'Forecast of top k trials for column {column}')
for column, figure in fit_dict['plots']['forecasts_ptiles'].items():
_log_plots_util(mdFile, ROOT_DIR, f'forecast-ptiles-{column}.png',
figure, f'Forecast of all ptiles for column {column}')
if 'scenarios' in fit_dict['plots'].keys():
mdFile.new_header(level=1, title="What if Scenarios")
for column, figure in fit_dict['plots']['scenarios'].items():
_log_plots_util(mdFile, ROOT_DIR, f'forecast-scenarios-{column}.png',
figure, '')
mdFile.new_paragraph("---")
def _log_tables(mdFile, fit_dict):
trials_processed = copy.deepcopy(fit_dict['trials'])
trials_processed['losses'] = np.around(trials_processed['losses'], 2)
trials_processed['params'] = [{key: np.around(value, 2) for key, value in params_dict.items()}
for params_dict in trials_processed['params']]
mdFile.new_header(level=2, title="Top 10 Trials")
df = pd.DataFrame.from_dict({(i+1, trials_processed['losses'][i]): trials_processed['params'][i]
for i in range(10)})
tbl = df.to_markdown()
mdFile.new_paragraph(tbl)
deciles = copy.deepcopy(fit_dict['deciles'])
for key in deciles.keys():
deciles[key]['params'] = {param: np.around(value, 2)
for param, value in deciles[key]['params'].items()}
for key in deciles.keys():
deciles[key]['df_loss'] = deciles[key]['df_loss'].astype(float).round(2)
mdFile.new_header(level=2, title="Decile Params")
df = pd.DataFrame.from_dict({np.around(key, 1) : deciles[key]['params'] for key in deciles.keys()})
tbl = df.to_markdown()
mdFile.new_paragraph(tbl)
mdFile.new_header(level=2, title="Decile Loss")
df = pd.DataFrame.from_dict({np.around(key, 1) : deciles[key]['df_loss'].to_dict()['train']
for key in deciles.keys()})
tbl = df.to_markdown()
mdFile.new_paragraph(tbl)
def save_dict_and_create_report(predictions_dict, config, ROOT_DIR='../../misc/reports/',
config_filename='default.yaml', config_ROOT_DIR='../../configs/seir'):
"""Creates report (BOTH MD and DOCX) for an input of a dict of predictions for a particular district/region
The DOCX file can directly be uploaded to Google Drive and shared with the people who have to review
Arguments:
predictions_dict {dict} -- Dict of predictions for a particual district/region [NOT ALL Districts]
Keyword Arguments:
ROOT_DIR {str} -- the path where the plots and the report would be saved (default: {'../../misc/reports/'})
"""
if not os.path.exists(ROOT_DIR):
os.makedirs(ROOT_DIR)
_dump_predictions_dict(predictions_dict, ROOT_DIR)
os.system(f'cp {config_ROOT_DIR}/{config_filename} {ROOT_DIR}/{config_filename}')
mdFile, filename = _create_md_file(predictions_dict, config, ROOT_DIR)
_log_hyperparams(mdFile, predictions_dict)
with open(f'{config_ROOT_DIR}/{config_filename}') as configfile:
config = yaml.load(configfile, Loader=yaml.SafeLoader)
if 'smoothing' in predictions_dict and predictions_dict['plots']['smoothing'] is not None:
_log_smoothing(mdFile, ROOT_DIR, predictions_dict)
mdFile.new_header(level=1, title=f'FIT')
_log_fits(mdFile, ROOT_DIR, predictions_dict)
mdFile.new_header(level=2, title=f'Uncertainty Fitting')
_log_uncertainty_fit(mdFile, predictions_dict)
mdFile.new_header(level=1, title=f'FORECASTS')
_log_forecasts(mdFile, ROOT_DIR, predictions_dict)
mdFile.new_header(level=1, title="Tables")
_log_tables(mdFile, predictions_dict)
# Create a table of contents
mdFile.new_table_of_contents(table_title='Contents', depth=2)
mdFile.create_md_file()
pypandoc.convert_file("{}.md".format(filename), 'docx', outputfile="{}.docx".format(filename))
# TODO: pdf conversion has some issues with order of images, low priority
| 42.234783 | 115 | 0.67902 |
3a05bcedc371f45435dc3a1f57ccbef6a0f98c9b | 7,818 | py | Python | cogs/xandy.py | brainfrozeno00o/xandy-bot | cbde3fc523a74e93d108cb9ce999dbca14e35651 | [
"MIT"
] | 1 | 2021-11-01T11:12:22.000Z | 2021-11-01T11:12:22.000Z | cogs/xandy.py | brainfrozeno00o/xandy-bot | cbde3fc523a74e93d108cb9ce999dbca14e35651 | [
"MIT"
] | 13 | 2021-09-27T00:06:26.000Z | 2021-11-27T06:54:21.000Z | cogs/xandy.py | brainfrozeno00o/xandy-bot | cbde3fc523a74e93d108cb9ce999dbca14e35651 | [
"MIT"
] | null | null | null | from random import randint
from discord import Embed
from discord.ext import commands
from discord.errors import Forbidden
from logging import getLogger
logger = getLogger(__name__)
# helper method for sending the embed on the channel where the commmand is called
async def send_embed(ctx, embed):
"""
Basically this is the helper function that sends the embed that is only for this class/cog
Takes the context and embed to be sent to the channel in this following hierarchy
- tries to send the embed in the channel
- tries to send a normal message when it cannot send the embed
- tries to send embed privately with information about the missing permissions
"""
logger.info("Sending embed...")
try:
await ctx.send(embed=embed)
except Forbidden:
try:
await ctx.send(
"Why can't I send embeds?!?!?!? Please check my permissions. PLEEEASEEEEE."
)
except:
await ctx.author.send(
f"I cannot send the embed in {ctx.channel.name} on {ctx.guild.name}\n"
f"Please inform Anjer Castillo on this. :slight_smile: ",
embed=embed,
)
# helper method for sending a message with an image
async def send_message_with_image(ctx, message, image):
"""
Basically this is the helper function that sends the message with an image that is only for this class/cog
Takes the context, message, and image to be sent to the channel in this following hierarchy
- tries to send the message and image in the channel
- tries to send a normal message when it cannot send both message and image
- tries to send message and image privately with information about the missing permissions
"""
logger.info("Sending message with image...")
try:
await ctx.send(message)
await ctx.send(image)
except Forbidden:
try:
await ctx.send(
"Why can't I send a message with an image?!?!?!? Please check my permissions. PLEEEASEEEEE."
)
except:
await ctx.author.send(
f"I cannot send this message: {message} with a image in {ctx.channel.name} on {ctx.guild.name}\n"
f"Please inform Anjer Castillo on this. :slight_smile: ",
)
await ctx.author.send(image)
class Xandy(commands.Cog):
# yes answers
AFFIRMATIVE = ["LGTM", "Looks good to me!", "Parfait!", "Nice"]
# no answers
NEGATIVE = [
"Hell nah!",
"Gawa mo ba 'yan? Kasi ang panget!!!",
"We know what we do not know.",
]
# unsure answers
UNSURE = [
"Tanong mo sa mama mo",
"Hindi ko alam. Hindi ko naman task 'yan eh.",
"Huwag mo akong tanungin. Malungkot pa ako. :cry:",
]
def __init__(self, bot):
self.bot = bot
@commands.command(
name="pogi",
aliases=["image", "xandypic"],
help="%pogi",
description="I will send a picture of my sexy self."
)
async def pogi(self, ctx):
logger.debug("Someone wants to request a Xander image...")
try:
all_images = self.bot.all_images
all_images_length = len(all_images)
# get random image
random_index = randint(0, all_images_length - 1)
random_image = all_images[random_index]
image_link = random_image[1]
# process message
message = "Here is a handsome picture of me. Hope you enjoy. :kissing_heart:"
# send the message
await send_message_with_image(ctx, message, image_link)
except Exception as e:
logger.error(f"Error occurred when trying to call pogi command: {e}")
pass
finally:
logger.info("Done processing for pogi command...")
@commands.command(
name="clown",
aliases=["quote", "xandysays"],
help="%clown",
description="I will give you a random quote at your will. :smile:",
)
async def clown(self, ctx):
logger.debug("Someone wants to request a Xander quote...")
try:
# set the variables
all_quotes = self.bot.all_quotes
all_quotes_length = len(all_quotes)
xander_image = self.bot.quote_image
# getting the random quote
random_index = randint(0, all_quotes_length - 1)
random_quote = all_quotes[random_index]
logger.info("Generating embed for sending...")
quote_taken = random_quote[1]
context_taken = random_quote[2]
# quotes with the new line most likely have the quotation marks already within the quote
if "\n" in quote_taken:
embed_description = f"""
{quote_taken}
- {context_taken}
"""
else:
embed_description = f'"{quote_taken}" - {context_taken}'
# setting up the embed
xander_embed = Embed(
title="Random Xander Quote",
description=embed_description,
color=0xCF37CA,
)
xander_embed.set_footer(text="This bot is powered by Xander's money")
xander_embed.set_image(url=xander_image)
logger.info("Sending random quote at will...")
# send the embed using the helper function
await send_embed(ctx, xander_embed)
except Exception as e:
logger.error(f"Error occurred when trying to call clown command: {e}")
pass
finally:
logger.info("Done processing for clown command...")
@commands.command(
name="lgtm",
aliases=["okba", "pwedeba"],
help="%lgtm <question|statement>",
description="I will try my best to say something on what you say :sweat_smile:",
)
async def lgtm(self, ctx, *input):
logger.debug("Someone wants to know what the bot has to say...")
try:
# check if there is no input
if not input:
logger.info("Call command for what...")
# generate embed for no question/statement
answer_embed = Embed(title="?", color=0xCF37CA)
# to be called when there is input
else:
logger.info("Generating response...")
"""
Determining what the answer would be using integers, the value will be as follows:
0 = YES
1 = NO
2 = UNSURE
"""
answer_int = randint(0, 2)
# bad if-else incoming
if answer_int == 0:
answer_list = self.AFFIRMATIVE
elif answer_int == 1:
answer_list = self.NEGATIVE
else:
answer_list = self.UNSURE
# getting random answer
answer_index = randint(0, len(answer_list) - 1)
answer = answer_list[answer_index]
# generate the embed
answer_embed = Embed(
title=f"{' '.join(input)}", description=f"{answer}", color=0xCF37CA
)
# set footer that this bot is powered by Xander's money
answer_embed.set_footer(text="This bot is powered by Xander's money")
# send the embed using the helper function
await send_embed(ctx, answer_embed)
except Exception as e:
logger.error(f"Error occurred when trying to call lgtm command: {e}")
pass
finally:
logger.info("Done processing for lgtm command...")
def setup(bot):
bot.add_cog(Xandy(bot))
| 35.862385 | 113 | 0.577513 |
29e786f2d2b0a05157d738253e55672bec7f4f3b | 293,245 | py | Python | metadata-ingestion/src/datahub/metadata/schema_classes.py | kuntalkumarbasu/datahub | 550a9deab0f32ee4314675b6497ee370e35b4397 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/metadata/schema_classes.py | kuntalkumarbasu/datahub | 550a9deab0f32ee4314675b6497ee370e35b4397 | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub/metadata/schema_classes.py | kuntalkumarbasu/datahub | 550a9deab0f32ee4314675b6497ee370e35b4397 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# This file is autogenerated by /metadata-ingestion/scripts/avro_codegen.py
# Do not modify manually!
# fmt: off
import json
import os.path
import decimal
import datetime
import six
from avrogen.dict_wrapper import DictWrapper
from avrogen import avrojson
from avro.schema import RecordSchema, SchemaFromJSONData as make_avsc_object
from avro import schema as avro_schema
from typing import List, Dict, Union, Optional
def __read_file(file_name):
with open(file_name, "r") as f:
return f.read()
def __get_names_and_schema(json_str):
names = avro_schema.Names()
schema = make_avsc_object(json.loads(json_str), names)
return names, schema
SCHEMA_JSON_STR = __read_file(os.path.join(os.path.dirname(__file__), "schema.avsc"))
__NAMES, SCHEMA = __get_names_and_schema(SCHEMA_JSON_STR)
__SCHEMAS: Dict[str, RecordSchema] = {}
def get_schema_type(fullname):
return __SCHEMAS.get(fullname)
__SCHEMAS = dict((n.fullname.lstrip("."), n) for n in six.itervalues(__NAMES.names))
class KafkaAuditHeaderClass(DictWrapper):
"""This header records information about the context of an event as it is emitted into kafka and is intended to be used by the kafka audit application. For more information see go/kafkaauditheader"""
RECORD_SCHEMA = get_schema_type("com.linkedin.events.KafkaAuditHeader")
def __init__(self,
time: int,
server: str,
appName: str,
messageId: bytes,
instance: Union[None, str]=None,
auditVersion: Union[None, int]=None,
fabricUrn: Union[None, str]=None,
clusterConnectionString: Union[None, str]=None,
):
super().__init__()
self.time = time
self.server = server
self.instance = instance
self.appName = appName
self.messageId = messageId
self.auditVersion = auditVersion
self.fabricUrn = fabricUrn
self.clusterConnectionString = clusterConnectionString
@classmethod
def construct_with_defaults(cls) -> "KafkaAuditHeaderClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.time = int()
self.server = str()
self.instance = self.RECORD_SCHEMA.field_map["instance"].default
self.appName = str()
self.messageId = bytes()
self.auditVersion = self.RECORD_SCHEMA.field_map["auditVersion"].default
self.fabricUrn = self.RECORD_SCHEMA.field_map["fabricUrn"].default
self.clusterConnectionString = self.RECORD_SCHEMA.field_map["clusterConnectionString"].default
@property
def time(self) -> int:
"""Getter: The time at which the event was emitted into kafka."""
return self._inner_dict.get('time') # type: ignore
@time.setter
def time(self, value: int) -> None:
"""Setter: The time at which the event was emitted into kafka."""
self._inner_dict['time'] = value
@property
def server(self) -> str:
"""Getter: The fully qualified name of the host from which the event is being emitted."""
return self._inner_dict.get('server') # type: ignore
@server.setter
def server(self, value: str) -> None:
"""Setter: The fully qualified name of the host from which the event is being emitted."""
self._inner_dict['server'] = value
@property
def instance(self) -> Union[None, str]:
"""Getter: The instance on the server from which the event is being emitted. e.g. i001"""
return self._inner_dict.get('instance') # type: ignore
@instance.setter
def instance(self, value: Union[None, str]) -> None:
"""Setter: The instance on the server from which the event is being emitted. e.g. i001"""
self._inner_dict['instance'] = value
@property
def appName(self) -> str:
"""Getter: The name of the application from which the event is being emitted. see go/appname"""
return self._inner_dict.get('appName') # type: ignore
@appName.setter
def appName(self, value: str) -> None:
"""Setter: The name of the application from which the event is being emitted. see go/appname"""
self._inner_dict['appName'] = value
@property
def messageId(self) -> bytes:
"""Getter: A unique identifier for the message"""
return self._inner_dict.get('messageId') # type: ignore
@messageId.setter
def messageId(self, value: bytes) -> None:
"""Setter: A unique identifier for the message"""
self._inner_dict['messageId'] = value
@property
def auditVersion(self) -> Union[None, int]:
"""Getter: The version that is being used for auditing. In version 0, the audit trail buckets events into 10 minute audit windows based on the EventHeader timestamp. In version 1, the audit trail buckets events as follows: if the schema has an outer KafkaAuditHeader, use the outer audit header timestamp for bucketing; else if the EventHeader has an inner KafkaAuditHeader use that inner audit header's timestamp for bucketing"""
return self._inner_dict.get('auditVersion') # type: ignore
@auditVersion.setter
def auditVersion(self, value: Union[None, int]) -> None:
"""Setter: The version that is being used for auditing. In version 0, the audit trail buckets events into 10 minute audit windows based on the EventHeader timestamp. In version 1, the audit trail buckets events as follows: if the schema has an outer KafkaAuditHeader, use the outer audit header timestamp for bucketing; else if the EventHeader has an inner KafkaAuditHeader use that inner audit header's timestamp for bucketing"""
self._inner_dict['auditVersion'] = value
@property
def fabricUrn(self) -> Union[None, str]:
"""Getter: The fabricUrn of the host from which the event is being emitted. Fabric Urn in the format of urn:li:fabric:{fabric_name}. See go/fabric."""
return self._inner_dict.get('fabricUrn') # type: ignore
@fabricUrn.setter
def fabricUrn(self, value: Union[None, str]) -> None:
"""Setter: The fabricUrn of the host from which the event is being emitted. Fabric Urn in the format of urn:li:fabric:{fabric_name}. See go/fabric."""
self._inner_dict['fabricUrn'] = value
@property
def clusterConnectionString(self) -> Union[None, str]:
"""Getter: This is a String that the client uses to establish some kind of connection with the Kafka cluster. The exact format of it depends on specific versions of clients and brokers. This information could potentially identify the fabric and cluster with which the client is producing to or consuming from."""
return self._inner_dict.get('clusterConnectionString') # type: ignore
@clusterConnectionString.setter
def clusterConnectionString(self, value: Union[None, str]) -> None:
"""Setter: This is a String that the client uses to establish some kind of connection with the Kafka cluster. The exact format of it depends on specific versions of clients and brokers. This information could potentially identify the fabric and cluster with which the client is producing to or consuming from."""
self._inner_dict['clusterConnectionString'] = value
class ChartInfoClass(DictWrapper):
"""Information about a chart"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.chart.ChartInfo")
def __init__(self,
title: str,
description: str,
lastModified: "ChangeAuditStampsClass",
customProperties: Optional[Dict[str, str]]=None,
externalUrl: Union[None, str]=None,
chartUrl: Union[None, str]=None,
inputs: Union[None, List[str]]=None,
type: Union[None, Union[str, "ChartTypeClass"]]=None,
access: Union[None, Union[str, "AccessLevelClass"]]=None,
lastRefreshed: Union[None, int]=None,
):
super().__init__()
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
self.externalUrl = externalUrl
self.title = title
self.description = description
self.lastModified = lastModified
self.chartUrl = chartUrl
self.inputs = inputs
self.type = type
self.access = access
self.lastRefreshed = lastRefreshed
@classmethod
def construct_with_defaults(cls) -> "ChartInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.customProperties = dict()
self.externalUrl = self.RECORD_SCHEMA.field_map["externalUrl"].default
self.title = str()
self.description = str()
self.lastModified = ChangeAuditStampsClass.construct_with_defaults()
self.chartUrl = self.RECORD_SCHEMA.field_map["chartUrl"].default
self.inputs = self.RECORD_SCHEMA.field_map["inputs"].default
self.type = self.RECORD_SCHEMA.field_map["type"].default
self.access = self.RECORD_SCHEMA.field_map["access"].default
self.lastRefreshed = self.RECORD_SCHEMA.field_map["lastRefreshed"].default
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: Custom property bag."""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: Custom property bag."""
self._inner_dict['customProperties'] = value
@property
def externalUrl(self) -> Union[None, str]:
"""Getter: URL where the reference exist"""
return self._inner_dict.get('externalUrl') # type: ignore
@externalUrl.setter
def externalUrl(self, value: Union[None, str]) -> None:
"""Setter: URL where the reference exist"""
self._inner_dict['externalUrl'] = value
@property
def title(self) -> str:
"""Getter: Title of the chart"""
return self._inner_dict.get('title') # type: ignore
@title.setter
def title(self, value: str) -> None:
"""Setter: Title of the chart"""
self._inner_dict['title'] = value
@property
def description(self) -> str:
"""Getter: Detailed description about the chart"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: str) -> None:
"""Setter: Detailed description about the chart"""
self._inner_dict['description'] = value
@property
def lastModified(self) -> "ChangeAuditStampsClass":
"""Getter: Captures information about who created/last modified/deleted this chart and when"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "ChangeAuditStampsClass") -> None:
"""Setter: Captures information about who created/last modified/deleted this chart and when"""
self._inner_dict['lastModified'] = value
@property
def chartUrl(self) -> Union[None, str]:
"""Getter: URL for the chart. This could be used as an external link on DataHub to allow users access/view the chart"""
return self._inner_dict.get('chartUrl') # type: ignore
@chartUrl.setter
def chartUrl(self, value: Union[None, str]) -> None:
"""Setter: URL for the chart. This could be used as an external link on DataHub to allow users access/view the chart"""
self._inner_dict['chartUrl'] = value
@property
def inputs(self) -> Union[None, List[str]]:
"""Getter: Data sources for the chart"""
return self._inner_dict.get('inputs') # type: ignore
@inputs.setter
def inputs(self, value: Union[None, List[str]]) -> None:
"""Setter: Data sources for the chart"""
self._inner_dict['inputs'] = value
@property
def type(self) -> Union[None, Union[str, "ChartTypeClass"]]:
"""Getter: Type of the chart"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[None, Union[str, "ChartTypeClass"]]) -> None:
"""Setter: Type of the chart"""
self._inner_dict['type'] = value
@property
def access(self) -> Union[None, Union[str, "AccessLevelClass"]]:
"""Getter: Access level for the chart"""
return self._inner_dict.get('access') # type: ignore
@access.setter
def access(self, value: Union[None, Union[str, "AccessLevelClass"]]) -> None:
"""Setter: Access level for the chart"""
self._inner_dict['access'] = value
@property
def lastRefreshed(self) -> Union[None, int]:
"""Getter: The time when this chart last refreshed"""
return self._inner_dict.get('lastRefreshed') # type: ignore
@lastRefreshed.setter
def lastRefreshed(self, value: Union[None, int]) -> None:
"""Setter: The time when this chart last refreshed"""
self._inner_dict['lastRefreshed'] = value
class ChartQueryClass(DictWrapper):
"""Information for chart query which is used for getting data of the chart"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.chart.ChartQuery")
def __init__(self,
rawQuery: str,
type: Union[str, "ChartQueryTypeClass"],
):
super().__init__()
self.rawQuery = rawQuery
self.type = type
@classmethod
def construct_with_defaults(cls) -> "ChartQueryClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.rawQuery = str()
self.type = ChartQueryTypeClass.LOOKML
@property
def rawQuery(self) -> str:
"""Getter: Raw query to build a chart from input datasets"""
return self._inner_dict.get('rawQuery') # type: ignore
@rawQuery.setter
def rawQuery(self, value: str) -> None:
"""Setter: Raw query to build a chart from input datasets"""
self._inner_dict['rawQuery'] = value
@property
def type(self) -> Union[str, "ChartQueryTypeClass"]:
"""Getter: Chart query type"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "ChartQueryTypeClass"]) -> None:
"""Setter: Chart query type"""
self._inner_dict['type'] = value
class ChartQueryTypeClass(object):
# No docs available.
"""LookML queries"""
LOOKML = "LOOKML"
"""SQL type queries"""
SQL = "SQL"
class ChartTypeClass(object):
"""The various types of charts"""
"""Chart showing a Bar chart"""
BAR = "BAR"
"""Chart showing a Pie chart"""
PIE = "PIE"
"""Chart showing a Scatter plot"""
SCATTER = "SCATTER"
"""Chart showing a table"""
TABLE = "TABLE"
"""Chart showing Markdown formatted text"""
TEXT = "TEXT"
LINE = "LINE"
AREA = "AREA"
HISTOGRAM = "HISTOGRAM"
BOX_PLOT = "BOX_PLOT"
class EditableChartPropertiesClass(DictWrapper):
"""Stores editable changes made to properties. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.chart.EditableChartProperties")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
description: Union[None, str]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.description = description
@classmethod
def construct_with_defaults(cls) -> "EditableChartPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Edited documentation of the chart """
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Edited documentation of the chart """
self._inner_dict['description'] = value
class AccessLevelClass(object):
"""The various access levels"""
"""Publicly available access level"""
PUBLIC = "PUBLIC"
"""Private availability to certain set of users"""
PRIVATE = "PRIVATE"
class AuditStampClass(DictWrapper):
"""Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.AuditStamp")
def __init__(self,
time: int,
actor: str,
impersonator: Union[None, str]=None,
):
super().__init__()
self.time = time
self.actor = actor
self.impersonator = impersonator
@classmethod
def construct_with_defaults(cls) -> "AuditStampClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.time = int()
self.actor = str()
self.impersonator = self.RECORD_SCHEMA.field_map["impersonator"].default
@property
def time(self) -> int:
"""Getter: When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."""
return self._inner_dict.get('time') # type: ignore
@time.setter
def time(self, value: int) -> None:
"""Setter: When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."""
self._inner_dict['time'] = value
@property
def actor(self) -> str:
"""Getter: The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."""
return self._inner_dict.get('actor') # type: ignore
@actor.setter
def actor(self, value: str) -> None:
"""Setter: The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."""
self._inner_dict['actor'] = value
@property
def impersonator(self) -> Union[None, str]:
"""Getter: The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."""
return self._inner_dict.get('impersonator') # type: ignore
@impersonator.setter
def impersonator(self, value: Union[None, str]) -> None:
"""Setter: The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."""
self._inner_dict['impersonator'] = value
class BrowsePathsClass(DictWrapper):
"""Shared aspect containing Browse Paths to be indexed for an entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.BrowsePaths")
def __init__(self,
paths: List[str],
):
super().__init__()
self.paths = paths
@classmethod
def construct_with_defaults(cls) -> "BrowsePathsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.paths = list()
@property
def paths(self) -> List[str]:
"""Getter: A list of valid browse paths for the entity.
Browse paths are expected to be backslash-separated strings. For example: 'prod/snowflake/datasetName'"""
return self._inner_dict.get('paths') # type: ignore
@paths.setter
def paths(self, value: List[str]) -> None:
"""Setter: A list of valid browse paths for the entity.
Browse paths are expected to be backslash-separated strings. For example: 'prod/snowflake/datasetName'"""
self._inner_dict['paths'] = value
class ChangeAuditStampsClass(DictWrapper):
"""Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into various lifecycle stages, and who acted to move it into those lifecycle stages. The recommended best practice is to include this record in your record schema, and annotate its fields as @readOnly in your resource. See https://github.com/linkedin/rest.li/wiki/Validation-in-Rest.li#restli-validation-annotations"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.ChangeAuditStamps")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
@classmethod
def construct_with_defaults(cls) -> "ChangeAuditStampsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
class CostClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.Cost")
def __init__(self,
costType: Union[str, "CostTypeClass"],
cost: "CostCostClass",
):
super().__init__()
self.costType = costType
self.cost = cost
@classmethod
def construct_with_defaults(cls) -> "CostClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.costType = CostTypeClass.ORG_COST_TYPE
self.cost = CostCostClass.construct_with_defaults()
@property
def costType(self) -> Union[str, "CostTypeClass"]:
# No docs available.
return self._inner_dict.get('costType') # type: ignore
@costType.setter
def costType(self, value: Union[str, "CostTypeClass"]) -> None:
# No docs available.
self._inner_dict['costType'] = value
@property
def cost(self) -> "CostCostClass":
# No docs available.
return self._inner_dict.get('cost') # type: ignore
@cost.setter
def cost(self, value: "CostCostClass") -> None:
# No docs available.
self._inner_dict['cost'] = value
class CostCostClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.CostCost")
def __init__(self,
fieldDiscriminator: Union[str, "CostCostDiscriminatorClass"],
costId: Union[None, float]=None,
costCode: Union[None, str]=None,
):
super().__init__()
self.costId = costId
self.costCode = costCode
self.fieldDiscriminator = fieldDiscriminator
@classmethod
def construct_with_defaults(cls) -> "CostCostClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.costId = self.RECORD_SCHEMA.field_map["costId"].default
self.costCode = self.RECORD_SCHEMA.field_map["costCode"].default
self.fieldDiscriminator = CostCostDiscriminatorClass.costId
@property
def costId(self) -> Union[None, float]:
# No docs available.
return self._inner_dict.get('costId') # type: ignore
@costId.setter
def costId(self, value: Union[None, float]) -> None:
# No docs available.
self._inner_dict['costId'] = value
@property
def costCode(self) -> Union[None, str]:
# No docs available.
return self._inner_dict.get('costCode') # type: ignore
@costCode.setter
def costCode(self, value: Union[None, str]) -> None:
# No docs available.
self._inner_dict['costCode'] = value
@property
def fieldDiscriminator(self) -> Union[str, "CostCostDiscriminatorClass"]:
"""Getter: Contains the name of the field that has its value set."""
return self._inner_dict.get('fieldDiscriminator') # type: ignore
@fieldDiscriminator.setter
def fieldDiscriminator(self, value: Union[str, "CostCostDiscriminatorClass"]) -> None:
"""Setter: Contains the name of the field that has its value set."""
self._inner_dict['fieldDiscriminator'] = value
class CostCostDiscriminatorClass(object):
# No docs available.
costId = "costId"
costCode = "costCode"
class CostTypeClass(object):
"""Type of Cost Code"""
"""Org Cost Type to which the Cost of this entity should be attributed to"""
ORG_COST_TYPE = "ORG_COST_TYPE"
class DeprecationClass(DictWrapper):
"""Deprecation status of an entity"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.Deprecation")
def __init__(self,
deprecated: bool,
note: str,
actor: str,
decommissionTime: Union[None, int]=None,
):
super().__init__()
self.deprecated = deprecated
self.decommissionTime = decommissionTime
self.note = note
self.actor = actor
@classmethod
def construct_with_defaults(cls) -> "DeprecationClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.deprecated = bool()
self.decommissionTime = self.RECORD_SCHEMA.field_map["decommissionTime"].default
self.note = str()
self.actor = str()
@property
def deprecated(self) -> bool:
"""Getter: Whether the entity is deprecated."""
return self._inner_dict.get('deprecated') # type: ignore
@deprecated.setter
def deprecated(self, value: bool) -> None:
"""Setter: Whether the entity is deprecated."""
self._inner_dict['deprecated'] = value
@property
def decommissionTime(self) -> Union[None, int]:
"""Getter: The time user plan to decommission this entity."""
return self._inner_dict.get('decommissionTime') # type: ignore
@decommissionTime.setter
def decommissionTime(self, value: Union[None, int]) -> None:
"""Setter: The time user plan to decommission this entity."""
self._inner_dict['decommissionTime'] = value
@property
def note(self) -> str:
"""Getter: Additional information about the entity deprecation plan, such as the wiki, doc, RB."""
return self._inner_dict.get('note') # type: ignore
@note.setter
def note(self, value: str) -> None:
"""Setter: Additional information about the entity deprecation plan, such as the wiki, doc, RB."""
self._inner_dict['note'] = value
@property
def actor(self) -> str:
"""Getter: The corpuser URN which will be credited for modifying this deprecation content."""
return self._inner_dict.get('actor') # type: ignore
@actor.setter
def actor(self, value: str) -> None:
"""Setter: The corpuser URN which will be credited for modifying this deprecation content."""
self._inner_dict['actor'] = value
class FabricTypeClass(object):
"""Fabric group type"""
"""Designates development fabrics"""
DEV = "DEV"
"""Designates early-integration (staging) fabrics"""
EI = "EI"
"""Designates production fabrics"""
PROD = "PROD"
"""Designates corporation fabrics"""
CORP = "CORP"
class GlobalTagsClass(DictWrapper):
"""Tag aspect used for applying tags to an entity"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.GlobalTags")
def __init__(self,
tags: List["TagAssociationClass"],
):
super().__init__()
self.tags = tags
@classmethod
def construct_with_defaults(cls) -> "GlobalTagsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tags = list()
@property
def tags(self) -> List["TagAssociationClass"]:
"""Getter: Tags associated with a given entity"""
return self._inner_dict.get('tags') # type: ignore
@tags.setter
def tags(self, value: List["TagAssociationClass"]) -> None:
"""Setter: Tags associated with a given entity"""
self._inner_dict['tags'] = value
class GlossaryTermAssociationClass(DictWrapper):
"""Properties of an applied glossary term."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.GlossaryTermAssociation")
def __init__(self,
urn: str,
):
super().__init__()
self.urn = urn
@classmethod
def construct_with_defaults(cls) -> "GlossaryTermAssociationClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
@property
def urn(self) -> str:
"""Getter: Urn of the applied glossary term"""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: Urn of the applied glossary term"""
self._inner_dict['urn'] = value
class GlossaryTermsClass(DictWrapper):
"""Related business terms information"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.GlossaryTerms")
def __init__(self,
terms: List["GlossaryTermAssociationClass"],
auditStamp: "AuditStampClass",
):
super().__init__()
self.terms = terms
self.auditStamp = auditStamp
@classmethod
def construct_with_defaults(cls) -> "GlossaryTermsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.terms = list()
self.auditStamp = AuditStampClass.construct_with_defaults()
@property
def terms(self) -> List["GlossaryTermAssociationClass"]:
"""Getter: The related business terms"""
return self._inner_dict.get('terms') # type: ignore
@terms.setter
def terms(self, value: List["GlossaryTermAssociationClass"]) -> None:
"""Setter: The related business terms"""
self._inner_dict['terms'] = value
@property
def auditStamp(self) -> "AuditStampClass":
"""Getter: Audit stamp containing who reported the related business term"""
return self._inner_dict.get('auditStamp') # type: ignore
@auditStamp.setter
def auditStamp(self, value: "AuditStampClass") -> None:
"""Setter: Audit stamp containing who reported the related business term"""
self._inner_dict['auditStamp'] = value
class InstitutionalMemoryClass(DictWrapper):
"""Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.InstitutionalMemory")
def __init__(self,
elements: List["InstitutionalMemoryMetadataClass"],
):
super().__init__()
self.elements = elements
@classmethod
def construct_with_defaults(cls) -> "InstitutionalMemoryClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.elements = list()
@property
def elements(self) -> List["InstitutionalMemoryMetadataClass"]:
"""Getter: List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."""
return self._inner_dict.get('elements') # type: ignore
@elements.setter
def elements(self, value: List["InstitutionalMemoryMetadataClass"]) -> None:
"""Setter: List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."""
self._inner_dict['elements'] = value
class InstitutionalMemoryMetadataClass(DictWrapper):
"""Metadata corresponding to a record of institutional memory."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.InstitutionalMemoryMetadata")
def __init__(self,
url: str,
description: str,
createStamp: "AuditStampClass",
):
super().__init__()
self.url = url
self.description = description
self.createStamp = createStamp
@classmethod
def construct_with_defaults(cls) -> "InstitutionalMemoryMetadataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.url = str()
self.description = str()
self.createStamp = AuditStampClass.construct_with_defaults()
@property
def url(self) -> str:
"""Getter: Link to an engineering design document or a wiki page."""
return self._inner_dict.get('url') # type: ignore
@url.setter
def url(self, value: str) -> None:
"""Setter: Link to an engineering design document or a wiki page."""
self._inner_dict['url'] = value
@property
def description(self) -> str:
"""Getter: Description of the link."""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: str) -> None:
"""Setter: Description of the link."""
self._inner_dict['description'] = value
@property
def createStamp(self) -> "AuditStampClass":
"""Getter: Audit stamp associated with creation of this record"""
return self._inner_dict.get('createStamp') # type: ignore
@createStamp.setter
def createStamp(self, value: "AuditStampClass") -> None:
"""Setter: Audit stamp associated with creation of this record"""
self._inner_dict['createStamp'] = value
class MLFeatureDataTypeClass(object):
"""MLFeature Data Type"""
"""Useless data is unique, discrete data with no potential relationship with the outcome variable.
A useless feature has high cardinality. An example would be bank account numbers that were generated randomly."""
USELESS = "USELESS"
"""Nominal data is made of discrete values with no numerical relationship between the different categories — mean and median are meaningless.
Animal species is one example. For example, pig is not higher than bird and lower than fish."""
NOMINAL = "NOMINAL"
"""Ordinal data are discrete integers that can be ranked or sorted.
For example, the distance between first and second may not be the same as the distance between second and third."""
ORDINAL = "ORDINAL"
"""Binary data is discrete data that can be in only one of two categories — either yes or no, 1 or 0, off or on, etc"""
BINARY = "BINARY"
"""Count data is discrete whole number data — no negative numbers here.
Count data often has many small values, such as zero and one."""
COUNT = "COUNT"
"""Time data is a cyclical, repeating continuous form of data.
The relevant time features can be any period— daily, weekly, monthly, annual, etc."""
TIME = "TIME"
"""Interval data has equal spaces between the numbers and does not represent a temporal pattern.
Examples include percentages, temperatures, and income."""
INTERVAL = "INTERVAL"
"""Image Data"""
IMAGE = "IMAGE"
"""Video Data"""
VIDEO = "VIDEO"
"""Audio Data"""
AUDIO = "AUDIO"
"""Text Data"""
TEXT = "TEXT"
"""Mapping Data Type ex: dict, map"""
MAP = "MAP"
"""Sequence Data Type ex: list, tuple, range"""
SEQUENCE = "SEQUENCE"
"""Set Data Type ex: set, frozenset"""
SET = "SET"
"""Continuous data are made of uncountable values, often the result of a measurement such as height, weight, age etc."""
CONTINUOUS = "CONTINUOUS"
"""Bytes data are binary-encoded values that can represent complex objects."""
BYTE = "BYTE"
"""Unknown data are data that we don't know the type for."""
UNKNOWN = "UNKNOWN"
class OwnerClass(DictWrapper):
"""Ownership information"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.Owner")
def __init__(self,
owner: str,
type: Union[str, "OwnershipTypeClass"],
source: Union[None, "OwnershipSourceClass"]=None,
):
super().__init__()
self.owner = owner
self.type = type
self.source = source
@classmethod
def construct_with_defaults(cls) -> "OwnerClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.owner = str()
self.type = OwnershipTypeClass.DEVELOPER
self.source = self.RECORD_SCHEMA.field_map["source"].default
@property
def owner(self) -> str:
"""Getter: Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name
(Caveat: only corpuser is currently supported in the frontend.)"""
return self._inner_dict.get('owner') # type: ignore
@owner.setter
def owner(self, value: str) -> None:
"""Setter: Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name
(Caveat: only corpuser is currently supported in the frontend.)"""
self._inner_dict['owner'] = value
@property
def type(self) -> Union[str, "OwnershipTypeClass"]:
"""Getter: The type of the ownership"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "OwnershipTypeClass"]) -> None:
"""Setter: The type of the ownership"""
self._inner_dict['type'] = value
@property
def source(self) -> Union[None, "OwnershipSourceClass"]:
"""Getter: Source information for the ownership"""
return self._inner_dict.get('source') # type: ignore
@source.setter
def source(self, value: Union[None, "OwnershipSourceClass"]) -> None:
"""Setter: Source information for the ownership"""
self._inner_dict['source'] = value
class OwnershipClass(DictWrapper):
"""Ownership information of an entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.Ownership")
def __init__(self,
owners: List["OwnerClass"],
lastModified: "AuditStampClass",
):
super().__init__()
self.owners = owners
self.lastModified = lastModified
@classmethod
def construct_with_defaults(cls) -> "OwnershipClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.owners = list()
self.lastModified = AuditStampClass.construct_with_defaults()
@property
def owners(self) -> List["OwnerClass"]:
"""Getter: List of owners of the entity."""
return self._inner_dict.get('owners') # type: ignore
@owners.setter
def owners(self, value: List["OwnerClass"]) -> None:
"""Setter: List of owners of the entity."""
self._inner_dict['owners'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: Audit stamp containing who last modified the record and when."""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: Audit stamp containing who last modified the record and when."""
self._inner_dict['lastModified'] = value
class OwnershipSourceClass(DictWrapper):
"""Source/provider of the ownership information"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.OwnershipSource")
def __init__(self,
type: Union[str, "OwnershipSourceTypeClass"],
url: Union[None, str]=None,
):
super().__init__()
self.type = type
self.url = url
@classmethod
def construct_with_defaults(cls) -> "OwnershipSourceClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.type = OwnershipSourceTypeClass.AUDIT
self.url = self.RECORD_SCHEMA.field_map["url"].default
@property
def type(self) -> Union[str, "OwnershipSourceTypeClass"]:
"""Getter: The type of the source"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "OwnershipSourceTypeClass"]) -> None:
"""Setter: The type of the source"""
self._inner_dict['type'] = value
@property
def url(self) -> Union[None, str]:
"""Getter: A reference URL for the source"""
return self._inner_dict.get('url') # type: ignore
@url.setter
def url(self, value: Union[None, str]) -> None:
"""Setter: A reference URL for the source"""
self._inner_dict['url'] = value
class OwnershipSourceTypeClass(object):
# No docs available.
"""Auditing system or audit logs"""
AUDIT = "AUDIT"
"""Database, e.g. GRANTS table"""
DATABASE = "DATABASE"
"""File system, e.g. file/directory owner"""
FILE_SYSTEM = "FILE_SYSTEM"
"""Issue tracking system, e.g. Jira"""
ISSUE_TRACKING_SYSTEM = "ISSUE_TRACKING_SYSTEM"
"""Manually provided by a user"""
MANUAL = "MANUAL"
"""Other ownership-like service, e.g. Nuage, ACL service etc"""
SERVICE = "SERVICE"
"""SCM system, e.g. GIT, SVN"""
SOURCE_CONTROL = "SOURCE_CONTROL"
"""Other sources"""
OTHER = "OTHER"
class OwnershipTypeClass(object):
"""Owner category or owner role"""
"""A person or group that is in charge of developing the code"""
DEVELOPER = "DEVELOPER"
"""A person or group that is owning the data"""
DATAOWNER = "DATAOWNER"
"""A person or a group that overseas the operation, e.g. a DBA or SRE."""
DELEGATE = "DELEGATE"
"""A person, group, or service that produces/generates the data"""
PRODUCER = "PRODUCER"
"""A person, group, or service that consumes the data"""
CONSUMER = "CONSUMER"
"""A person or a group that has direct business interest"""
STAKEHOLDER = "STAKEHOLDER"
class StatusClass(DictWrapper):
"""The status metadata of an entity, e.g. dataset, metric, feature, etc."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.Status")
def __init__(self,
removed: Optional[bool]=None,
):
super().__init__()
if removed is None:
self.removed = False
else:
self.removed = removed
@classmethod
def construct_with_defaults(cls) -> "StatusClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.removed = self.RECORD_SCHEMA.field_map["removed"].default
@property
def removed(self) -> bool:
"""Getter: whether the entity is removed or not"""
return self._inner_dict.get('removed') # type: ignore
@removed.setter
def removed(self, value: bool) -> None:
"""Setter: whether the entity is removed or not"""
self._inner_dict['removed'] = value
class TagAssociationClass(DictWrapper):
"""Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.
propagation parameters."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.TagAssociation")
def __init__(self,
tag: str,
):
super().__init__()
self.tag = tag
@classmethod
def construct_with_defaults(cls) -> "TagAssociationClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tag = str()
@property
def tag(self) -> str:
"""Getter: Urn of the applied tag"""
return self._inner_dict.get('tag') # type: ignore
@tag.setter
def tag(self, value: str) -> None:
"""Setter: Urn of the applied tag"""
self._inner_dict['tag'] = value
class VersionTagClass(DictWrapper):
"""A resource-defined string representing the resource state for the purpose of concurrency control"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.VersionTag")
def __init__(self,
versionTag: Union[None, str]=None,
):
super().__init__()
self.versionTag = versionTag
@classmethod
def construct_with_defaults(cls) -> "VersionTagClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.versionTag = self.RECORD_SCHEMA.field_map["versionTag"].default
@property
def versionTag(self) -> Union[None, str]:
# No docs available.
return self._inner_dict.get('versionTag') # type: ignore
@versionTag.setter
def versionTag(self, value: Union[None, str]) -> None:
# No docs available.
self._inner_dict['versionTag'] = value
class TransformationTypeClass(object):
"""Type of the transformation involved in generating destination fields from source fields."""
"""Field transformation expressed as unknown black box function."""
BLACKBOX = "BLACKBOX"
"""Field transformation expressed as Identity function."""
IDENTITY = "IDENTITY"
class UDFTransformerClass(DictWrapper):
"""Field transformation expressed in UDF"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.common.fieldtransformer.UDFTransformer")
def __init__(self,
udf: str,
):
super().__init__()
self.udf = udf
@classmethod
def construct_with_defaults(cls) -> "UDFTransformerClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.udf = str()
@property
def udf(self) -> str:
"""Getter: A UDF mentioning how the source fields got transformed to destination field. This is the FQCN(Fully Qualified Class Name) of the udf."""
return self._inner_dict.get('udf') # type: ignore
@udf.setter
def udf(self, value: str) -> None:
"""Setter: A UDF mentioning how the source fields got transformed to destination field. This is the FQCN(Fully Qualified Class Name) of the udf."""
self._inner_dict['udf'] = value
class DashboardInfoClass(DictWrapper):
"""Information about a dashboard"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dashboard.DashboardInfo")
def __init__(self,
title: str,
description: str,
lastModified: "ChangeAuditStampsClass",
customProperties: Optional[Dict[str, str]]=None,
externalUrl: Union[None, str]=None,
charts: Optional[List[str]]=None,
dashboardUrl: Union[None, str]=None,
access: Union[None, Union[str, "AccessLevelClass"]]=None,
lastRefreshed: Union[None, int]=None,
):
super().__init__()
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
self.externalUrl = externalUrl
self.title = title
self.description = description
if charts is None:
self.charts = []
else:
self.charts = charts
self.lastModified = lastModified
self.dashboardUrl = dashboardUrl
self.access = access
self.lastRefreshed = lastRefreshed
@classmethod
def construct_with_defaults(cls) -> "DashboardInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.customProperties = dict()
self.externalUrl = self.RECORD_SCHEMA.field_map["externalUrl"].default
self.title = str()
self.description = str()
self.charts = list()
self.lastModified = ChangeAuditStampsClass.construct_with_defaults()
self.dashboardUrl = self.RECORD_SCHEMA.field_map["dashboardUrl"].default
self.access = self.RECORD_SCHEMA.field_map["access"].default
self.lastRefreshed = self.RECORD_SCHEMA.field_map["lastRefreshed"].default
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: Custom property bag."""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: Custom property bag."""
self._inner_dict['customProperties'] = value
@property
def externalUrl(self) -> Union[None, str]:
"""Getter: URL where the reference exist"""
return self._inner_dict.get('externalUrl') # type: ignore
@externalUrl.setter
def externalUrl(self, value: Union[None, str]) -> None:
"""Setter: URL where the reference exist"""
self._inner_dict['externalUrl'] = value
@property
def title(self) -> str:
"""Getter: Title of the dashboard"""
return self._inner_dict.get('title') # type: ignore
@title.setter
def title(self, value: str) -> None:
"""Setter: Title of the dashboard"""
self._inner_dict['title'] = value
@property
def description(self) -> str:
"""Getter: Detailed description about the dashboard"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: str) -> None:
"""Setter: Detailed description about the dashboard"""
self._inner_dict['description'] = value
@property
def charts(self) -> List[str]:
"""Getter: Charts in a dashboard"""
return self._inner_dict.get('charts') # type: ignore
@charts.setter
def charts(self, value: List[str]) -> None:
"""Setter: Charts in a dashboard"""
self._inner_dict['charts'] = value
@property
def lastModified(self) -> "ChangeAuditStampsClass":
"""Getter: Captures information about who created/last modified/deleted this dashboard and when"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "ChangeAuditStampsClass") -> None:
"""Setter: Captures information about who created/last modified/deleted this dashboard and when"""
self._inner_dict['lastModified'] = value
@property
def dashboardUrl(self) -> Union[None, str]:
"""Getter: URL for the dashboard. This could be used as an external link on DataHub to allow users access/view the dashboard"""
return self._inner_dict.get('dashboardUrl') # type: ignore
@dashboardUrl.setter
def dashboardUrl(self, value: Union[None, str]) -> None:
"""Setter: URL for the dashboard. This could be used as an external link on DataHub to allow users access/view the dashboard"""
self._inner_dict['dashboardUrl'] = value
@property
def access(self) -> Union[None, Union[str, "AccessLevelClass"]]:
"""Getter: Access level for the dashboard"""
return self._inner_dict.get('access') # type: ignore
@access.setter
def access(self, value: Union[None, Union[str, "AccessLevelClass"]]) -> None:
"""Setter: Access level for the dashboard"""
self._inner_dict['access'] = value
@property
def lastRefreshed(self) -> Union[None, int]:
"""Getter: The time when this dashboard last refreshed"""
return self._inner_dict.get('lastRefreshed') # type: ignore
@lastRefreshed.setter
def lastRefreshed(self, value: Union[None, int]) -> None:
"""Setter: The time when this dashboard last refreshed"""
self._inner_dict['lastRefreshed'] = value
class EditableDashboardPropertiesClass(DictWrapper):
"""Stores editable changes made to properties. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dashboard.EditableDashboardProperties")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
description: Union[None, str]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.description = description
@classmethod
def construct_with_defaults(cls) -> "EditableDashboardPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Edited documentation of the dashboard"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Edited documentation of the dashboard"""
self._inner_dict['description'] = value
class DataFlowInfoClass(DictWrapper):
"""Information about a Data processing flow"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.datajob.DataFlowInfo")
def __init__(self,
name: str,
customProperties: Optional[Dict[str, str]]=None,
externalUrl: Union[None, str]=None,
description: Union[None, str]=None,
project: Union[None, str]=None,
):
super().__init__()
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
self.externalUrl = externalUrl
self.name = name
self.description = description
self.project = project
@classmethod
def construct_with_defaults(cls) -> "DataFlowInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.customProperties = dict()
self.externalUrl = self.RECORD_SCHEMA.field_map["externalUrl"].default
self.name = str()
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.project = self.RECORD_SCHEMA.field_map["project"].default
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: Custom property bag."""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: Custom property bag."""
self._inner_dict['customProperties'] = value
@property
def externalUrl(self) -> Union[None, str]:
"""Getter: URL where the reference exist"""
return self._inner_dict.get('externalUrl') # type: ignore
@externalUrl.setter
def externalUrl(self, value: Union[None, str]) -> None:
"""Setter: URL where the reference exist"""
self._inner_dict['externalUrl'] = value
@property
def name(self) -> str:
"""Getter: Flow name"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Flow name"""
self._inner_dict['name'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Flow description"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Flow description"""
self._inner_dict['description'] = value
@property
def project(self) -> Union[None, str]:
"""Getter: Optional project/namespace associated with the flow"""
return self._inner_dict.get('project') # type: ignore
@project.setter
def project(self, value: Union[None, str]) -> None:
"""Setter: Optional project/namespace associated with the flow"""
self._inner_dict['project'] = value
class DataJobInfoClass(DictWrapper):
"""Information about a Data processing job"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.datajob.DataJobInfo")
def __init__(self,
name: str,
type: Union[str, "AzkabanJobTypeClass"],
customProperties: Optional[Dict[str, str]]=None,
externalUrl: Union[None, str]=None,
description: Union[None, str]=None,
flowUrn: Union[None, str]=None,
):
super().__init__()
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
self.externalUrl = externalUrl
self.name = name
self.description = description
self.type = type
self.flowUrn = flowUrn
@classmethod
def construct_with_defaults(cls) -> "DataJobInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.customProperties = dict()
self.externalUrl = self.RECORD_SCHEMA.field_map["externalUrl"].default
self.name = str()
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.type = AzkabanJobTypeClass.COMMAND
self.flowUrn = self.RECORD_SCHEMA.field_map["flowUrn"].default
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: Custom property bag."""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: Custom property bag."""
self._inner_dict['customProperties'] = value
@property
def externalUrl(self) -> Union[None, str]:
"""Getter: URL where the reference exist"""
return self._inner_dict.get('externalUrl') # type: ignore
@externalUrl.setter
def externalUrl(self, value: Union[None, str]) -> None:
"""Setter: URL where the reference exist"""
self._inner_dict['externalUrl'] = value
@property
def name(self) -> str:
"""Getter: Job name"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Job name"""
self._inner_dict['name'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Job description"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Job description"""
self._inner_dict['description'] = value
@property
def type(self) -> Union[str, "AzkabanJobTypeClass"]:
"""Getter: Datajob type"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "AzkabanJobTypeClass"]) -> None:
"""Setter: Datajob type"""
self._inner_dict['type'] = value
@property
def flowUrn(self) -> Union[None, str]:
"""Getter: DataFlow urn that this job is part of"""
return self._inner_dict.get('flowUrn') # type: ignore
@flowUrn.setter
def flowUrn(self, value: Union[None, str]) -> None:
"""Setter: DataFlow urn that this job is part of"""
self._inner_dict['flowUrn'] = value
class DataJobInputOutputClass(DictWrapper):
"""Information about the inputs and outputs of a Data processing job"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.datajob.DataJobInputOutput")
def __init__(self,
inputDatasets: List[str],
outputDatasets: List[str],
inputDatajobs: Union[None, List[str]]=None,
):
super().__init__()
self.inputDatasets = inputDatasets
self.outputDatasets = outputDatasets
self.inputDatajobs = inputDatajobs
@classmethod
def construct_with_defaults(cls) -> "DataJobInputOutputClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.inputDatasets = list()
self.outputDatasets = list()
self.inputDatajobs = self.RECORD_SCHEMA.field_map["inputDatajobs"].default
@property
def inputDatasets(self) -> List[str]:
"""Getter: Input datasets consumed by the data job during processing"""
return self._inner_dict.get('inputDatasets') # type: ignore
@inputDatasets.setter
def inputDatasets(self, value: List[str]) -> None:
"""Setter: Input datasets consumed by the data job during processing"""
self._inner_dict['inputDatasets'] = value
@property
def outputDatasets(self) -> List[str]:
"""Getter: Output datasets produced by the data job during processing"""
return self._inner_dict.get('outputDatasets') # type: ignore
@outputDatasets.setter
def outputDatasets(self, value: List[str]) -> None:
"""Setter: Output datasets produced by the data job during processing"""
self._inner_dict['outputDatasets'] = value
@property
def inputDatajobs(self) -> Union[None, List[str]]:
"""Getter: Input datajobs that this data job depends on"""
return self._inner_dict.get('inputDatajobs') # type: ignore
@inputDatajobs.setter
def inputDatajobs(self, value: Union[None, List[str]]) -> None:
"""Setter: Input datajobs that this data job depends on"""
self._inner_dict['inputDatajobs'] = value
class EditableDataFlowPropertiesClass(DictWrapper):
"""Stores editable changes made to properties. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.datajob.EditableDataFlowProperties")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
description: Union[None, str]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.description = description
@classmethod
def construct_with_defaults(cls) -> "EditableDataFlowPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Edited documentation of the data flow"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Edited documentation of the data flow"""
self._inner_dict['description'] = value
class EditableDataJobPropertiesClass(DictWrapper):
"""Stores editable changes made to properties. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.datajob.EditableDataJobProperties")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
description: Union[None, str]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.description = description
@classmethod
def construct_with_defaults(cls) -> "EditableDataJobPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Edited documentation of the data job """
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Edited documentation of the data job """
self._inner_dict['description'] = value
class AzkabanJobTypeClass(object):
"""The various types of support azkaban jobs"""
"""The command job type is one of the basic built-in types. It runs multiple UNIX commands using java processbuilder.
Upon execution, Azkaban spawns off a process to run the command."""
COMMAND = "COMMAND"
"""Runs a java program with ability to access Hadoop cluster.
https://azkaban.readthedocs.io/en/latest/jobTypes.html#java-job-type"""
HADOOP_JAVA = "HADOOP_JAVA"
"""In large part, this is the same Command type. The difference is its ability to talk to a Hadoop cluster
securely, via Hadoop tokens."""
HADOOP_SHELL = "HADOOP_SHELL"
"""Hive type is for running Hive jobs."""
HIVE = "HIVE"
"""Pig type is for running Pig jobs."""
PIG = "PIG"
"""SQL is for running Presto, mysql queries etc"""
SQL = "SQL"
class DataPlatformInfoClass(DictWrapper):
"""Information about a data platform"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataplatform.DataPlatformInfo")
def __init__(self,
name: str,
type: Union[str, "PlatformTypeClass"],
datasetNameDelimiter: str,
displayName: Union[None, str]=None,
logoUrl: Union[None, str]=None,
):
super().__init__()
self.name = name
self.displayName = displayName
self.type = type
self.datasetNameDelimiter = datasetNameDelimiter
self.logoUrl = logoUrl
@classmethod
def construct_with_defaults(cls) -> "DataPlatformInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
self.displayName = self.RECORD_SCHEMA.field_map["displayName"].default
self.type = PlatformTypeClass.FILE_SYSTEM
self.datasetNameDelimiter = str()
self.logoUrl = self.RECORD_SCHEMA.field_map["logoUrl"].default
@property
def name(self) -> str:
"""Getter: Name of the data platform"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the data platform"""
self._inner_dict['name'] = value
@property
def displayName(self) -> Union[None, str]:
"""Getter: The name that will be used for displaying a platform type."""
return self._inner_dict.get('displayName') # type: ignore
@displayName.setter
def displayName(self, value: Union[None, str]) -> None:
"""Setter: The name that will be used for displaying a platform type."""
self._inner_dict['displayName'] = value
@property
def type(self) -> Union[str, "PlatformTypeClass"]:
"""Getter: Platform type this data platform describes"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "PlatformTypeClass"]) -> None:
"""Setter: Platform type this data platform describes"""
self._inner_dict['type'] = value
@property
def datasetNameDelimiter(self) -> str:
"""Getter: The delimiter in the dataset names on the data platform, e.g. '/' for HDFS and '.' for Oracle"""
return self._inner_dict.get('datasetNameDelimiter') # type: ignore
@datasetNameDelimiter.setter
def datasetNameDelimiter(self, value: str) -> None:
"""Setter: The delimiter in the dataset names on the data platform, e.g. '/' for HDFS and '.' for Oracle"""
self._inner_dict['datasetNameDelimiter'] = value
@property
def logoUrl(self) -> Union[None, str]:
"""Getter: The URL for a logo associated with the platform"""
return self._inner_dict.get('logoUrl') # type: ignore
@logoUrl.setter
def logoUrl(self, value: Union[None, str]) -> None:
"""Setter: The URL for a logo associated with the platform"""
self._inner_dict['logoUrl'] = value
class PlatformTypeClass(object):
"""Platform types available at LinkedIn"""
"""Value for a file system, e.g. hdfs"""
FILE_SYSTEM = "FILE_SYSTEM"
"""Value for a key value store, e.g. espresso, voldemort"""
KEY_VALUE_STORE = "KEY_VALUE_STORE"
"""Value for a message broker, e.g. kafka"""
MESSAGE_BROKER = "MESSAGE_BROKER"
"""Value for an object store, e.g. ambry"""
OBJECT_STORE = "OBJECT_STORE"
"""Value for an OLAP datastore, e.g. pinot"""
OLAP_DATASTORE = "OLAP_DATASTORE"
"""Value for other platforms, e.g salesforce, dovetail"""
OTHERS = "OTHERS"
"""Value for a query engine, e.g. presto"""
QUERY_ENGINE = "QUERY_ENGINE"
"""Value for a relational database, e.g. oracle, mysql"""
RELATIONAL_DB = "RELATIONAL_DB"
"""Value for a search engine, e.g seas"""
SEARCH_ENGINE = "SEARCH_ENGINE"
class DataProcessInfoClass(DictWrapper):
"""The inputs and outputs of this data process"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataprocess.DataProcessInfo")
def __init__(self,
inputs: Union[None, List[str]]=None,
outputs: Union[None, List[str]]=None,
):
super().__init__()
self.inputs = inputs
self.outputs = outputs
@classmethod
def construct_with_defaults(cls) -> "DataProcessInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.inputs = self.RECORD_SCHEMA.field_map["inputs"].default
self.outputs = self.RECORD_SCHEMA.field_map["outputs"].default
@property
def inputs(self) -> Union[None, List[str]]:
"""Getter: the inputs of the data process"""
return self._inner_dict.get('inputs') # type: ignore
@inputs.setter
def inputs(self, value: Union[None, List[str]]) -> None:
"""Setter: the inputs of the data process"""
self._inner_dict['inputs'] = value
@property
def outputs(self) -> Union[None, List[str]]:
"""Getter: the outputs of the data process"""
return self._inner_dict.get('outputs') # type: ignore
@outputs.setter
def outputs(self, value: Union[None, List[str]]) -> None:
"""Setter: the outputs of the data process"""
self._inner_dict['outputs'] = value
class DatasetDeprecationClass(DictWrapper):
"""Dataset deprecation status"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.DatasetDeprecation")
def __init__(self,
deprecated: bool,
note: str,
decommissionTime: Union[None, int]=None,
actor: Union[None, str]=None,
):
super().__init__()
self.deprecated = deprecated
self.decommissionTime = decommissionTime
self.note = note
self.actor = actor
@classmethod
def construct_with_defaults(cls) -> "DatasetDeprecationClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.deprecated = bool()
self.decommissionTime = self.RECORD_SCHEMA.field_map["decommissionTime"].default
self.note = str()
self.actor = self.RECORD_SCHEMA.field_map["actor"].default
@property
def deprecated(self) -> bool:
"""Getter: Whether the dataset is deprecated by owner."""
return self._inner_dict.get('deprecated') # type: ignore
@deprecated.setter
def deprecated(self, value: bool) -> None:
"""Setter: Whether the dataset is deprecated by owner."""
self._inner_dict['deprecated'] = value
@property
def decommissionTime(self) -> Union[None, int]:
"""Getter: The time user plan to decommission this dataset."""
return self._inner_dict.get('decommissionTime') # type: ignore
@decommissionTime.setter
def decommissionTime(self, value: Union[None, int]) -> None:
"""Setter: The time user plan to decommission this dataset."""
self._inner_dict['decommissionTime'] = value
@property
def note(self) -> str:
"""Getter: Additional information about the dataset deprecation plan, such as the wiki, doc, RB."""
return self._inner_dict.get('note') # type: ignore
@note.setter
def note(self, value: str) -> None:
"""Setter: Additional information about the dataset deprecation plan, such as the wiki, doc, RB."""
self._inner_dict['note'] = value
@property
def actor(self) -> Union[None, str]:
"""Getter: The corpuser URN which will be credited for modifying this deprecation content."""
return self._inner_dict.get('actor') # type: ignore
@actor.setter
def actor(self, value: Union[None, str]) -> None:
"""Setter: The corpuser URN which will be credited for modifying this deprecation content."""
self._inner_dict['actor'] = value
class DatasetFieldMappingClass(DictWrapper):
"""Representation of mapping between fields in source dataset to the field in destination dataset"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.DatasetFieldMapping")
def __init__(self,
created: "AuditStampClass",
transformation: Union[Union[str, "TransformationTypeClass"], "UDFTransformerClass"],
sourceFields: List[str],
destinationField: str,
):
super().__init__()
self.created = created
self.transformation = transformation
self.sourceFields = sourceFields
self.destinationField = destinationField
@classmethod
def construct_with_defaults(cls) -> "DatasetFieldMappingClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.transformation = TransformationTypeClass.BLACKBOX
self.sourceFields = list()
self.destinationField = str()
@property
def created(self) -> "AuditStampClass":
"""Getter: Audit stamp containing who reported the field mapping and when"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: Audit stamp containing who reported the field mapping and when"""
self._inner_dict['created'] = value
@property
def transformation(self) -> Union[Union[str, "TransformationTypeClass"], "UDFTransformerClass"]:
"""Getter: Transfomration function between the fields involved"""
return self._inner_dict.get('transformation') # type: ignore
@transformation.setter
def transformation(self, value: Union[Union[str, "TransformationTypeClass"], "UDFTransformerClass"]) -> None:
"""Setter: Transfomration function between the fields involved"""
self._inner_dict['transformation'] = value
@property
def sourceFields(self) -> List[str]:
"""Getter: Source fields from which the fine grained lineage is derived"""
return self._inner_dict.get('sourceFields') # type: ignore
@sourceFields.setter
def sourceFields(self, value: List[str]) -> None:
"""Setter: Source fields from which the fine grained lineage is derived"""
self._inner_dict['sourceFields'] = value
@property
def destinationField(self) -> str:
"""Getter: Destination field which is derived from source fields"""
return self._inner_dict.get('destinationField') # type: ignore
@destinationField.setter
def destinationField(self, value: str) -> None:
"""Setter: Destination field which is derived from source fields"""
self._inner_dict['destinationField'] = value
class DatasetLineageTypeClass(object):
"""The various types of supported dataset lineage"""
"""Direct copy without modification"""
COPY = "COPY"
"""Transformed data with modification (format or content change)"""
TRANSFORMED = "TRANSFORMED"
"""Represents a view defined on the sources e.g. Hive view defined on underlying hive tables or a Hive table pointing to a HDFS dataset or DALI view defined on multiple sources"""
VIEW = "VIEW"
class DatasetPropertiesClass(DictWrapper):
"""Properties associated with a Dataset"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.DatasetProperties")
def __init__(self,
customProperties: Optional[Dict[str, str]]=None,
externalUrl: Union[None, str]=None,
description: Union[None, str]=None,
uri: Union[None, str]=None,
tags: Optional[List[str]]=None,
):
super().__init__()
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
self.externalUrl = externalUrl
self.description = description
self.uri = uri
if tags is None:
self.tags = []
else:
self.tags = tags
@classmethod
def construct_with_defaults(cls) -> "DatasetPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.customProperties = dict()
self.externalUrl = self.RECORD_SCHEMA.field_map["externalUrl"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.uri = self.RECORD_SCHEMA.field_map["uri"].default
self.tags = list()
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: Custom property bag."""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: Custom property bag."""
self._inner_dict['customProperties'] = value
@property
def externalUrl(self) -> Union[None, str]:
"""Getter: URL where the reference exist"""
return self._inner_dict.get('externalUrl') # type: ignore
@externalUrl.setter
def externalUrl(self, value: Union[None, str]) -> None:
"""Setter: URL where the reference exist"""
self._inner_dict['externalUrl'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the dataset"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the dataset"""
self._inner_dict['description'] = value
@property
def uri(self) -> Union[None, str]:
"""Getter: The abstracted URI such as hdfs:///data/tracking/PageViewEvent, file:///dir/file_name. Uri should not include any environment specific properties. Some datasets might not have a standardized uri, which makes this field optional (i.e. kafka topic)."""
return self._inner_dict.get('uri') # type: ignore
@uri.setter
def uri(self, value: Union[None, str]) -> None:
"""Setter: The abstracted URI such as hdfs:///data/tracking/PageViewEvent, file:///dir/file_name. Uri should not include any environment specific properties. Some datasets might not have a standardized uri, which makes this field optional (i.e. kafka topic)."""
self._inner_dict['uri'] = value
@property
def tags(self) -> List[str]:
"""Getter: [Legacy] Unstructured tags for the dataset. Structured tags can be applied via the `GlobalTags` aspect."""
return self._inner_dict.get('tags') # type: ignore
@tags.setter
def tags(self, value: List[str]) -> None:
"""Setter: [Legacy] Unstructured tags for the dataset. Structured tags can be applied via the `GlobalTags` aspect."""
self._inner_dict['tags'] = value
class DatasetUpstreamLineageClass(DictWrapper):
"""Fine Grained upstream lineage for fields in a dataset"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.DatasetUpstreamLineage")
def __init__(self,
fieldMappings: List["DatasetFieldMappingClass"],
):
super().__init__()
self.fieldMappings = fieldMappings
@classmethod
def construct_with_defaults(cls) -> "DatasetUpstreamLineageClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.fieldMappings = list()
@property
def fieldMappings(self) -> List["DatasetFieldMappingClass"]:
"""Getter: Upstream to downstream field level lineage mappings"""
return self._inner_dict.get('fieldMappings') # type: ignore
@fieldMappings.setter
def fieldMappings(self, value: List["DatasetFieldMappingClass"]) -> None:
"""Setter: Upstream to downstream field level lineage mappings"""
self._inner_dict['fieldMappings'] = value
class EditableDatasetPropertiesClass(DictWrapper):
"""EditableDatasetProperties stores editable changes made to dataset properties. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.EditableDatasetProperties")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
deleted: Union[None, "AuditStampClass"]=None,
description: Union[None, str]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.description = description
@classmethod
def construct_with_defaults(cls) -> "EditableDatasetPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the dataset"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the dataset"""
self._inner_dict['description'] = value
class UpstreamClass(DictWrapper):
"""Upstream lineage information about a dataset including the source reporting the lineage"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.Upstream")
def __init__(self,
auditStamp: "AuditStampClass",
dataset: str,
type: Union[str, "DatasetLineageTypeClass"],
):
super().__init__()
self.auditStamp = auditStamp
self.dataset = dataset
self.type = type
@classmethod
def construct_with_defaults(cls) -> "UpstreamClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.auditStamp = AuditStampClass.construct_with_defaults()
self.dataset = str()
self.type = DatasetLineageTypeClass.COPY
@property
def auditStamp(self) -> "AuditStampClass":
"""Getter: Audit stamp containing who reported the lineage and when"""
return self._inner_dict.get('auditStamp') # type: ignore
@auditStamp.setter
def auditStamp(self, value: "AuditStampClass") -> None:
"""Setter: Audit stamp containing who reported the lineage and when"""
self._inner_dict['auditStamp'] = value
@property
def dataset(self) -> str:
"""Getter: The upstream dataset the lineage points to"""
return self._inner_dict.get('dataset') # type: ignore
@dataset.setter
def dataset(self, value: str) -> None:
"""Setter: The upstream dataset the lineage points to"""
self._inner_dict['dataset'] = value
@property
def type(self) -> Union[str, "DatasetLineageTypeClass"]:
"""Getter: The type of the lineage"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "DatasetLineageTypeClass"]) -> None:
"""Setter: The type of the lineage"""
self._inner_dict['type'] = value
class UpstreamLineageClass(DictWrapper):
"""Upstream lineage of a dataset"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.dataset.UpstreamLineage")
def __init__(self,
upstreams: List["UpstreamClass"],
):
super().__init__()
self.upstreams = upstreams
@classmethod
def construct_with_defaults(cls) -> "UpstreamLineageClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.upstreams = list()
@property
def upstreams(self) -> List["UpstreamClass"]:
"""Getter: List of upstream dataset lineage information"""
return self._inner_dict.get('upstreams') # type: ignore
@upstreams.setter
def upstreams(self, value: List["UpstreamClass"]) -> None:
"""Setter: List of upstream dataset lineage information"""
self._inner_dict['upstreams'] = value
class GlossaryNodeInfoClass(DictWrapper):
"""Properties associated with a GlossaryNode"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.glossary.GlossaryNodeInfo")
def __init__(self,
definition: str,
parentNode: Union[None, str]=None,
):
super().__init__()
self.definition = definition
self.parentNode = parentNode
@classmethod
def construct_with_defaults(cls) -> "GlossaryNodeInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.definition = str()
self.parentNode = self.RECORD_SCHEMA.field_map["parentNode"].default
@property
def definition(self) -> str:
"""Getter: Definition of business node"""
return self._inner_dict.get('definition') # type: ignore
@definition.setter
def definition(self, value: str) -> None:
"""Setter: Definition of business node"""
self._inner_dict['definition'] = value
@property
def parentNode(self) -> Union[None, str]:
"""Getter: Parent node of the glossary term"""
return self._inner_dict.get('parentNode') # type: ignore
@parentNode.setter
def parentNode(self, value: Union[None, str]) -> None:
"""Setter: Parent node of the glossary term"""
self._inner_dict['parentNode'] = value
class GlossaryTermInfoClass(DictWrapper):
"""Properties associated with a GlossaryTerm"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.glossary.GlossaryTermInfo")
def __init__(self,
definition: str,
termSource: str,
parentNode: Union[None, str]=None,
sourceRef: Union[None, str]=None,
sourceUrl: Union[None, str]=None,
customProperties: Optional[Dict[str, str]]=None,
):
super().__init__()
self.definition = definition
self.parentNode = parentNode
self.termSource = termSource
self.sourceRef = sourceRef
self.sourceUrl = sourceUrl
if customProperties is None:
self.customProperties = {}
else:
self.customProperties = customProperties
@classmethod
def construct_with_defaults(cls) -> "GlossaryTermInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.definition = str()
self.parentNode = self.RECORD_SCHEMA.field_map["parentNode"].default
self.termSource = str()
self.sourceRef = self.RECORD_SCHEMA.field_map["sourceRef"].default
self.sourceUrl = self.RECORD_SCHEMA.field_map["sourceUrl"].default
self.customProperties = dict()
@property
def definition(self) -> str:
"""Getter: Definition of business term"""
return self._inner_dict.get('definition') # type: ignore
@definition.setter
def definition(self, value: str) -> None:
"""Setter: Definition of business term"""
self._inner_dict['definition'] = value
@property
def parentNode(self) -> Union[None, str]:
"""Getter: Parent node of the glossary term"""
return self._inner_dict.get('parentNode') # type: ignore
@parentNode.setter
def parentNode(self, value: Union[None, str]) -> None:
"""Setter: Parent node of the glossary term"""
self._inner_dict['parentNode'] = value
@property
def termSource(self) -> str:
"""Getter: Source of the Business Term (INTERNAL or EXTERNAL) with default value as INTERNAL"""
return self._inner_dict.get('termSource') # type: ignore
@termSource.setter
def termSource(self, value: str) -> None:
"""Setter: Source of the Business Term (INTERNAL or EXTERNAL) with default value as INTERNAL"""
self._inner_dict['termSource'] = value
@property
def sourceRef(self) -> Union[None, str]:
"""Getter: External Reference to the business-term"""
return self._inner_dict.get('sourceRef') # type: ignore
@sourceRef.setter
def sourceRef(self, value: Union[None, str]) -> None:
"""Setter: External Reference to the business-term"""
self._inner_dict['sourceRef'] = value
@property
def sourceUrl(self) -> Union[None, str]:
"""Getter: The abstracted URL such as https://spec.edmcouncil.org/fibo/ontology/FBC/FinancialInstruments/FinancialInstruments/CashInstrument."""
return self._inner_dict.get('sourceUrl') # type: ignore
@sourceUrl.setter
def sourceUrl(self, value: Union[None, str]) -> None:
"""Setter: The abstracted URL such as https://spec.edmcouncil.org/fibo/ontology/FBC/FinancialInstruments/FinancialInstruments/CashInstrument."""
self._inner_dict['sourceUrl'] = value
@property
def customProperties(self) -> Dict[str, str]:
"""Getter: A key-value map to capture any other non-standardized properties for the glossary term"""
return self._inner_dict.get('customProperties') # type: ignore
@customProperties.setter
def customProperties(self, value: Dict[str, str]) -> None:
"""Setter: A key-value map to capture any other non-standardized properties for the glossary term"""
self._inner_dict['customProperties'] = value
class CorpGroupInfoClass(DictWrapper):
"""group of corpUser, it may contains nested group"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.identity.CorpGroupInfo")
def __init__(self,
email: str,
admins: List[str],
members: List[str],
groups: List[str],
):
super().__init__()
self.email = email
self.admins = admins
self.members = members
self.groups = groups
@classmethod
def construct_with_defaults(cls) -> "CorpGroupInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.email = str()
self.admins = list()
self.members = list()
self.groups = list()
@property
def email(self) -> str:
"""Getter: email of this group"""
return self._inner_dict.get('email') # type: ignore
@email.setter
def email(self, value: str) -> None:
"""Setter: email of this group"""
self._inner_dict['email'] = value
@property
def admins(self) -> List[str]:
"""Getter: owners of this group"""
return self._inner_dict.get('admins') # type: ignore
@admins.setter
def admins(self, value: List[str]) -> None:
"""Setter: owners of this group"""
self._inner_dict['admins'] = value
@property
def members(self) -> List[str]:
"""Getter: List of ldap urn in this group."""
return self._inner_dict.get('members') # type: ignore
@members.setter
def members(self, value: List[str]) -> None:
"""Setter: List of ldap urn in this group."""
self._inner_dict['members'] = value
@property
def groups(self) -> List[str]:
"""Getter: List of groups in this group."""
return self._inner_dict.get('groups') # type: ignore
@groups.setter
def groups(self, value: List[str]) -> None:
"""Setter: List of groups in this group."""
self._inner_dict['groups'] = value
class CorpUserEditableInfoClass(DictWrapper):
"""Linkedin corp user information that can be edited from UI"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.identity.CorpUserEditableInfo")
def __init__(self,
aboutMe: Union[None, str]=None,
teams: Optional[List[str]]=None,
skills: Optional[List[str]]=None,
pictureLink: Optional[str]=None,
):
super().__init__()
self.aboutMe = aboutMe
if teams is None:
self.teams = []
else:
self.teams = teams
if skills is None:
self.skills = []
else:
self.skills = skills
if pictureLink is None:
self.pictureLink = 'https://raw.githubusercontent.com/linkedin/datahub/master/datahub-web/packages/data-portal/public/assets/images/default_avatar.png'
else:
self.pictureLink = pictureLink
@classmethod
def construct_with_defaults(cls) -> "CorpUserEditableInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.aboutMe = self.RECORD_SCHEMA.field_map["aboutMe"].default
self.teams = list()
self.skills = list()
self.pictureLink = self.RECORD_SCHEMA.field_map["pictureLink"].default
@property
def aboutMe(self) -> Union[None, str]:
"""Getter: About me section of the user"""
return self._inner_dict.get('aboutMe') # type: ignore
@aboutMe.setter
def aboutMe(self, value: Union[None, str]) -> None:
"""Setter: About me section of the user"""
self._inner_dict['aboutMe'] = value
@property
def teams(self) -> List[str]:
"""Getter: Teams that the user belongs to e.g. Metadata"""
return self._inner_dict.get('teams') # type: ignore
@teams.setter
def teams(self, value: List[str]) -> None:
"""Setter: Teams that the user belongs to e.g. Metadata"""
self._inner_dict['teams'] = value
@property
def skills(self) -> List[str]:
"""Getter: Skills that the user possesses e.g. Machine Learning"""
return self._inner_dict.get('skills') # type: ignore
@skills.setter
def skills(self, value: List[str]) -> None:
"""Setter: Skills that the user possesses e.g. Machine Learning"""
self._inner_dict['skills'] = value
@property
def pictureLink(self) -> str:
"""Getter: A URL which points to a picture which user wants to set as a profile photo"""
return self._inner_dict.get('pictureLink') # type: ignore
@pictureLink.setter
def pictureLink(self, value: str) -> None:
"""Setter: A URL which points to a picture which user wants to set as a profile photo"""
self._inner_dict['pictureLink'] = value
class CorpUserInfoClass(DictWrapper):
"""Linkedin corp user information"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.identity.CorpUserInfo")
def __init__(self,
active: bool,
email: str,
displayName: Union[None, str]=None,
title: Union[None, str]=None,
managerUrn: Union[None, str]=None,
departmentId: Union[None, int]=None,
departmentName: Union[None, str]=None,
firstName: Union[None, str]=None,
lastName: Union[None, str]=None,
fullName: Union[None, str]=None,
countryCode: Union[None, str]=None,
):
super().__init__()
self.active = active
self.displayName = displayName
self.email = email
self.title = title
self.managerUrn = managerUrn
self.departmentId = departmentId
self.departmentName = departmentName
self.firstName = firstName
self.lastName = lastName
self.fullName = fullName
self.countryCode = countryCode
@classmethod
def construct_with_defaults(cls) -> "CorpUserInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.active = bool()
self.displayName = self.RECORD_SCHEMA.field_map["displayName"].default
self.email = str()
self.title = self.RECORD_SCHEMA.field_map["title"].default
self.managerUrn = self.RECORD_SCHEMA.field_map["managerUrn"].default
self.departmentId = self.RECORD_SCHEMA.field_map["departmentId"].default
self.departmentName = self.RECORD_SCHEMA.field_map["departmentName"].default
self.firstName = self.RECORD_SCHEMA.field_map["firstName"].default
self.lastName = self.RECORD_SCHEMA.field_map["lastName"].default
self.fullName = self.RECORD_SCHEMA.field_map["fullName"].default
self.countryCode = self.RECORD_SCHEMA.field_map["countryCode"].default
@property
def active(self) -> bool:
"""Getter: Whether the corpUser is active, ref: https://iwww.corp.linkedin.com/wiki/cf/display/GTSD/Accessing+Active+Directory+via+LDAP+tools"""
return self._inner_dict.get('active') # type: ignore
@active.setter
def active(self, value: bool) -> None:
"""Setter: Whether the corpUser is active, ref: https://iwww.corp.linkedin.com/wiki/cf/display/GTSD/Accessing+Active+Directory+via+LDAP+tools"""
self._inner_dict['active'] = value
@property
def displayName(self) -> Union[None, str]:
"""Getter: displayName of this user , e.g. Hang Zhang(DataHQ)"""
return self._inner_dict.get('displayName') # type: ignore
@displayName.setter
def displayName(self, value: Union[None, str]) -> None:
"""Setter: displayName of this user , e.g. Hang Zhang(DataHQ)"""
self._inner_dict['displayName'] = value
@property
def email(self) -> str:
"""Getter: email address of this user"""
return self._inner_dict.get('email') # type: ignore
@email.setter
def email(self, value: str) -> None:
"""Setter: email address of this user"""
self._inner_dict['email'] = value
@property
def title(self) -> Union[None, str]:
"""Getter: title of this user"""
return self._inner_dict.get('title') # type: ignore
@title.setter
def title(self, value: Union[None, str]) -> None:
"""Setter: title of this user"""
self._inner_dict['title'] = value
@property
def managerUrn(self) -> Union[None, str]:
"""Getter: direct manager of this user"""
return self._inner_dict.get('managerUrn') # type: ignore
@managerUrn.setter
def managerUrn(self, value: Union[None, str]) -> None:
"""Setter: direct manager of this user"""
self._inner_dict['managerUrn'] = value
@property
def departmentId(self) -> Union[None, int]:
"""Getter: department id this user belong to"""
return self._inner_dict.get('departmentId') # type: ignore
@departmentId.setter
def departmentId(self, value: Union[None, int]) -> None:
"""Setter: department id this user belong to"""
self._inner_dict['departmentId'] = value
@property
def departmentName(self) -> Union[None, str]:
"""Getter: department name this user belong to"""
return self._inner_dict.get('departmentName') # type: ignore
@departmentName.setter
def departmentName(self, value: Union[None, str]) -> None:
"""Setter: department name this user belong to"""
self._inner_dict['departmentName'] = value
@property
def firstName(self) -> Union[None, str]:
"""Getter: first name of this user"""
return self._inner_dict.get('firstName') # type: ignore
@firstName.setter
def firstName(self, value: Union[None, str]) -> None:
"""Setter: first name of this user"""
self._inner_dict['firstName'] = value
@property
def lastName(self) -> Union[None, str]:
"""Getter: last name of this user"""
return self._inner_dict.get('lastName') # type: ignore
@lastName.setter
def lastName(self, value: Union[None, str]) -> None:
"""Setter: last name of this user"""
self._inner_dict['lastName'] = value
@property
def fullName(self) -> Union[None, str]:
"""Getter: Common name of this user, format is firstName + lastName (split by a whitespace)"""
return self._inner_dict.get('fullName') # type: ignore
@fullName.setter
def fullName(self, value: Union[None, str]) -> None:
"""Setter: Common name of this user, format is firstName + lastName (split by a whitespace)"""
self._inner_dict['fullName'] = value
@property
def countryCode(self) -> Union[None, str]:
"""Getter: two uppercase letters country code. e.g. US"""
return self._inner_dict.get('countryCode') # type: ignore
@countryCode.setter
def countryCode(self, value: Union[None, str]) -> None:
"""Setter: two uppercase letters country code. e.g. US"""
self._inner_dict['countryCode'] = value
class ChartKeyClass(DictWrapper):
"""Key for a Chart"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.ChartKey")
def __init__(self,
dashboardTool: str,
chartId: str,
):
super().__init__()
self.dashboardTool = dashboardTool
self.chartId = chartId
@classmethod
def construct_with_defaults(cls) -> "ChartKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.dashboardTool = str()
self.chartId = str()
@property
def dashboardTool(self) -> str:
"""Getter: The name of the dashboard tool such as looker, redash etc."""
return self._inner_dict.get('dashboardTool') # type: ignore
@dashboardTool.setter
def dashboardTool(self, value: str) -> None:
"""Setter: The name of the dashboard tool such as looker, redash etc."""
self._inner_dict['dashboardTool'] = value
@property
def chartId(self) -> str:
"""Getter: Unique id for the chart. This id should be globally unique for a dashboarding tool even when there are multiple deployments of it. As an example, chart URL could be used here for Looker such as 'looker.linkedin.com/looks/1234'"""
return self._inner_dict.get('chartId') # type: ignore
@chartId.setter
def chartId(self, value: str) -> None:
"""Setter: Unique id for the chart. This id should be globally unique for a dashboarding tool even when there are multiple deployments of it. As an example, chart URL could be used here for Looker such as 'looker.linkedin.com/looks/1234'"""
self._inner_dict['chartId'] = value
class CorpGroupKeyClass(DictWrapper):
"""Key for a CorpGroup"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.CorpGroupKey")
def __init__(self,
name: str,
):
super().__init__()
self.name = name
@classmethod
def construct_with_defaults(cls) -> "CorpGroupKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
@property
def name(self) -> str:
"""Getter: The name of the AD/LDAP group."""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: The name of the AD/LDAP group."""
self._inner_dict['name'] = value
class CorpUserKeyClass(DictWrapper):
"""Key for a CorpUser"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.CorpUserKey")
def __init__(self,
username: str,
):
super().__init__()
self.username = username
@classmethod
def construct_with_defaults(cls) -> "CorpUserKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.username = str()
@property
def username(self) -> str:
"""Getter: The name of the AD/LDAP user."""
return self._inner_dict.get('username') # type: ignore
@username.setter
def username(self, value: str) -> None:
"""Setter: The name of the AD/LDAP user."""
self._inner_dict['username'] = value
class DashboardKeyClass(DictWrapper):
"""Key for a Dashboard"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DashboardKey")
def __init__(self,
dashboardTool: str,
dashboardId: str,
):
super().__init__()
self.dashboardTool = dashboardTool
self.dashboardId = dashboardId
@classmethod
def construct_with_defaults(cls) -> "DashboardKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.dashboardTool = str()
self.dashboardId = str()
@property
def dashboardTool(self) -> str:
"""Getter: The name of the dashboard tool such as looker, redash etc."""
return self._inner_dict.get('dashboardTool') # type: ignore
@dashboardTool.setter
def dashboardTool(self, value: str) -> None:
"""Setter: The name of the dashboard tool such as looker, redash etc."""
self._inner_dict['dashboardTool'] = value
@property
def dashboardId(self) -> str:
"""Getter: Unique id for the dashboard. This id should be globally unique for a dashboarding tool even when there are multiple deployments of it. As an example, dashboard URL could be used here for Looker such as 'looker.linkedin.com/dashboards/1234'"""
return self._inner_dict.get('dashboardId') # type: ignore
@dashboardId.setter
def dashboardId(self, value: str) -> None:
"""Setter: Unique id for the dashboard. This id should be globally unique for a dashboarding tool even when there are multiple deployments of it. As an example, dashboard URL could be used here for Looker such as 'looker.linkedin.com/dashboards/1234'"""
self._inner_dict['dashboardId'] = value
class DataFlowKeyClass(DictWrapper):
"""Key for a Data Flow"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DataFlowKey")
def __init__(self,
orchestrator: str,
flowId: str,
cluster: str,
):
super().__init__()
self.orchestrator = orchestrator
self.flowId = flowId
self.cluster = cluster
@classmethod
def construct_with_defaults(cls) -> "DataFlowKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.orchestrator = str()
self.flowId = str()
self.cluster = str()
@property
def orchestrator(self) -> str:
"""Getter: Workflow manager like azkaban, airflow which orchestrates the flow"""
return self._inner_dict.get('orchestrator') # type: ignore
@orchestrator.setter
def orchestrator(self, value: str) -> None:
"""Setter: Workflow manager like azkaban, airflow which orchestrates the flow"""
self._inner_dict['orchestrator'] = value
@property
def flowId(self) -> str:
"""Getter: Unique Identifier of the data flow"""
return self._inner_dict.get('flowId') # type: ignore
@flowId.setter
def flowId(self, value: str) -> None:
"""Setter: Unique Identifier of the data flow"""
self._inner_dict['flowId'] = value
@property
def cluster(self) -> str:
"""Getter: Cluster where the flow is executed"""
return self._inner_dict.get('cluster') # type: ignore
@cluster.setter
def cluster(self, value: str) -> None:
"""Setter: Cluster where the flow is executed"""
self._inner_dict['cluster'] = value
class DataJobKeyClass(DictWrapper):
"""Key for a Data Job"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DataJobKey")
def __init__(self,
flow: str,
jobId: str,
):
super().__init__()
self.flow = flow
self.jobId = jobId
@classmethod
def construct_with_defaults(cls) -> "DataJobKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.flow = str()
self.jobId = str()
@property
def flow(self) -> str:
"""Getter: Standardized data processing flow urn representing the flow for the job"""
return self._inner_dict.get('flow') # type: ignore
@flow.setter
def flow(self, value: str) -> None:
"""Setter: Standardized data processing flow urn representing the flow for the job"""
self._inner_dict['flow'] = value
@property
def jobId(self) -> str:
"""Getter: Unique Identifier of the data job"""
return self._inner_dict.get('jobId') # type: ignore
@jobId.setter
def jobId(self, value: str) -> None:
"""Setter: Unique Identifier of the data job"""
self._inner_dict['jobId'] = value
class DataPlatformKeyClass(DictWrapper):
"""Key for a Data Platform"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DataPlatformKey")
def __init__(self,
platformName: str,
):
super().__init__()
self.platformName = platformName
@classmethod
def construct_with_defaults(cls) -> "DataPlatformKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.platformName = str()
@property
def platformName(self) -> str:
"""Getter: Data platform name i.e. hdfs, oracle, espresso"""
return self._inner_dict.get('platformName') # type: ignore
@platformName.setter
def platformName(self, value: str) -> None:
"""Setter: Data platform name i.e. hdfs, oracle, espresso"""
self._inner_dict['platformName'] = value
class DataProcessKeyClass(DictWrapper):
"""Key for a Data Process"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DataProcessKey")
def __init__(self,
name: str,
orchestrator: str,
origin: Union[str, "FabricTypeClass"],
):
super().__init__()
self.name = name
self.orchestrator = orchestrator
self.origin = origin
@classmethod
def construct_with_defaults(cls) -> "DataProcessKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
self.orchestrator = str()
self.origin = FabricTypeClass.DEV
@property
def name(self) -> str:
"""Getter: Process name i.e. an ETL job name"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Process name i.e. an ETL job name"""
self._inner_dict['name'] = value
@property
def orchestrator(self) -> str:
"""Getter: Standardized Orchestrator where data process is defined.
TODO: Migrate towards something that can be validated like DataPlatform urn"""
return self._inner_dict.get('orchestrator') # type: ignore
@orchestrator.setter
def orchestrator(self, value: str) -> None:
"""Setter: Standardized Orchestrator where data process is defined.
TODO: Migrate towards something that can be validated like DataPlatform urn"""
self._inner_dict['orchestrator'] = value
@property
def origin(self) -> Union[str, "FabricTypeClass"]:
"""Getter: Fabric type where dataset belongs to or where it was generated."""
return self._inner_dict.get('origin') # type: ignore
@origin.setter
def origin(self, value: Union[str, "FabricTypeClass"]) -> None:
"""Setter: Fabric type where dataset belongs to or where it was generated."""
self._inner_dict['origin'] = value
class DatasetKeyClass(DictWrapper):
"""Key for a Dataset"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.DatasetKey")
def __init__(self,
platform: str,
name: str,
origin: Union[str, "FabricTypeClass"],
):
super().__init__()
self.platform = platform
self.name = name
self.origin = origin
@classmethod
def construct_with_defaults(cls) -> "DatasetKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.platform = str()
self.name = str()
self.origin = FabricTypeClass.DEV
@property
def platform(self) -> str:
"""Getter: Data platform urn associated with the dataset"""
return self._inner_dict.get('platform') # type: ignore
@platform.setter
def platform(self, value: str) -> None:
"""Setter: Data platform urn associated with the dataset"""
self._inner_dict['platform'] = value
@property
def name(self) -> str:
"""Getter: Dataset native name e.g. <db>.<table>, /dir/subdir/<name>, or <name>"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Dataset native name e.g. <db>.<table>, /dir/subdir/<name>, or <name>"""
self._inner_dict['name'] = value
@property
def origin(self) -> Union[str, "FabricTypeClass"]:
"""Getter: Fabric type where dataset belongs to or where it was generated."""
return self._inner_dict.get('origin') # type: ignore
@origin.setter
def origin(self, value: Union[str, "FabricTypeClass"]) -> None:
"""Setter: Fabric type where dataset belongs to or where it was generated."""
self._inner_dict['origin'] = value
class GlossaryNodeKeyClass(DictWrapper):
"""Key for a GlossaryNode"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.GlossaryNodeKey")
def __init__(self,
name: str,
):
super().__init__()
self.name = name
@classmethod
def construct_with_defaults(cls) -> "GlossaryNodeKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
@property
def name(self) -> str:
# No docs available.
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
# No docs available.
self._inner_dict['name'] = value
class GlossaryTermKeyClass(DictWrapper):
"""Key for a GlossaryTerm"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.GlossaryTermKey")
def __init__(self,
name: str,
):
super().__init__()
self.name = name
@classmethod
def construct_with_defaults(cls) -> "GlossaryTermKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
@property
def name(self) -> str:
# No docs available.
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
# No docs available.
self._inner_dict['name'] = value
class MLFeatureKeyClass(DictWrapper):
"""Key for an MLFeature"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.MLFeatureKey")
def __init__(self,
featureNamespace: str,
name: str,
):
super().__init__()
self.featureNamespace = featureNamespace
self.name = name
@classmethod
def construct_with_defaults(cls) -> "MLFeatureKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.featureNamespace = str()
self.name = str()
@property
def featureNamespace(self) -> str:
"""Getter: Namespace for the feature"""
return self._inner_dict.get('featureNamespace') # type: ignore
@featureNamespace.setter
def featureNamespace(self, value: str) -> None:
"""Setter: Namespace for the feature"""
self._inner_dict['featureNamespace'] = value
@property
def name(self) -> str:
"""Getter: Name of the feature"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the feature"""
self._inner_dict['name'] = value
class MLFeatureTableKeyClass(DictWrapper):
"""Key for an MLFeatureTable"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.MLFeatureTableKey")
def __init__(self,
platform: str,
name: str,
):
super().__init__()
self.platform = platform
self.name = name
@classmethod
def construct_with_defaults(cls) -> "MLFeatureTableKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.platform = str()
self.name = str()
@property
def platform(self) -> str:
"""Getter: Data platform urn associated with the feature table"""
return self._inner_dict.get('platform') # type: ignore
@platform.setter
def platform(self, value: str) -> None:
"""Setter: Data platform urn associated with the feature table"""
self._inner_dict['platform'] = value
@property
def name(self) -> str:
"""Getter: Name of the feature table"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the feature table"""
self._inner_dict['name'] = value
class MLModelKeyClass(DictWrapper):
"""Key for an ML model"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.MLModelKey")
def __init__(self,
platform: str,
name: str,
origin: Union[str, "FabricTypeClass"],
):
super().__init__()
self.platform = platform
self.name = name
self.origin = origin
@classmethod
def construct_with_defaults(cls) -> "MLModelKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.platform = str()
self.name = str()
self.origin = FabricTypeClass.DEV
@property
def platform(self) -> str:
"""Getter: Standardized platform urn for the model"""
return self._inner_dict.get('platform') # type: ignore
@platform.setter
def platform(self, value: str) -> None:
"""Setter: Standardized platform urn for the model"""
self._inner_dict['platform'] = value
@property
def name(self) -> str:
"""Getter: Name of the MLModel"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the MLModel"""
self._inner_dict['name'] = value
@property
def origin(self) -> Union[str, "FabricTypeClass"]:
"""Getter: Fabric type where model belongs to or where it was generated"""
return self._inner_dict.get('origin') # type: ignore
@origin.setter
def origin(self, value: Union[str, "FabricTypeClass"]) -> None:
"""Setter: Fabric type where model belongs to or where it was generated"""
self._inner_dict['origin'] = value
class MLPrimaryKeyKeyClass(DictWrapper):
"""Key for an MLPrimaryKey"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.MLPrimaryKeyKey")
def __init__(self,
featureNamespace: str,
name: str,
):
super().__init__()
self.featureNamespace = featureNamespace
self.name = name
@classmethod
def construct_with_defaults(cls) -> "MLPrimaryKeyKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.featureNamespace = str()
self.name = str()
@property
def featureNamespace(self) -> str:
"""Getter: Namespace for the primary key"""
return self._inner_dict.get('featureNamespace') # type: ignore
@featureNamespace.setter
def featureNamespace(self, value: str) -> None:
"""Setter: Namespace for the primary key"""
self._inner_dict['featureNamespace'] = value
@property
def name(self) -> str:
"""Getter: Name of the primary key"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the primary key"""
self._inner_dict['name'] = value
class TagKeyClass(DictWrapper):
"""Key for a Tag"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.key.TagKey")
def __init__(self,
name: str,
):
super().__init__()
self.name = name
@classmethod
def construct_with_defaults(cls) -> "TagKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
@property
def name(self) -> str:
"""Getter: The unique tag name"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: The unique tag name"""
self._inner_dict['name'] = value
class ChartSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific Chart entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.ChartSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["ChartKeyClass", "ChartInfoClass", "ChartQueryClass", "EditableChartPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "ChartSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["ChartKeyClass", "ChartInfoClass", "ChartQueryClass", "EditableChartPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the chart. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["ChartKeyClass", "ChartInfoClass", "ChartQueryClass", "EditableChartPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the chart. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class CorpGroupSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific CorpGroup entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.CorpGroupSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["CorpGroupKeyClass", "CorpGroupInfoClass", "GlobalTagsClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "CorpGroupSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["CorpGroupKeyClass", "CorpGroupInfoClass", "GlobalTagsClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the LdapUser. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["CorpGroupKeyClass", "CorpGroupInfoClass", "GlobalTagsClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the LdapUser. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class CorpUserSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific CorpUser entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.CorpUserSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["CorpUserKeyClass", "CorpUserInfoClass", "CorpUserEditableInfoClass", "GlobalTagsClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "CorpUserSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["CorpUserKeyClass", "CorpUserInfoClass", "CorpUserEditableInfoClass", "GlobalTagsClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the CorpUser. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["CorpUserKeyClass", "CorpUserInfoClass", "CorpUserEditableInfoClass", "GlobalTagsClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the CorpUser. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DashboardSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific Dashboard entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DashboardSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DashboardKeyClass", "DashboardInfoClass", "EditableDashboardPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DashboardSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DashboardKeyClass", "DashboardInfoClass", "EditableDashboardPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the dashboard. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DashboardKeyClass", "DashboardInfoClass", "EditableDashboardPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the dashboard. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DataFlowSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific DataFlow entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DataFlowSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DataFlowKeyClass", "DataFlowInfoClass", "EditableDataFlowPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DataFlowSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DataFlowKeyClass", "DataFlowInfoClass", "EditableDataFlowPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the data flow. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DataFlowKeyClass", "DataFlowInfoClass", "EditableDataFlowPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the data flow. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DataJobSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific DataJob entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DataJobSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DataJobKeyClass", "DataJobInfoClass", "DataJobInputOutputClass", "EditableDataJobPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DataJobSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DataJobKeyClass", "DataJobInfoClass", "DataJobInputOutputClass", "EditableDataJobPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the data job. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DataJobKeyClass", "DataJobInfoClass", "DataJobInputOutputClass", "EditableDataJobPropertiesClass", "OwnershipClass", "StatusClass", "GlobalTagsClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the data job. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DataPlatformSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific dataplatform entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DataPlatformSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DataPlatformKeyClass", "DataPlatformInfoClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DataPlatformSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DataPlatformKeyClass", "DataPlatformInfoClass"]]:
"""Getter: The list of metadata aspects associated with the data platform. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DataPlatformKeyClass", "DataPlatformInfoClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the data platform. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DataProcessSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific Data process entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DataProcessSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DataProcessKeyClass", "OwnershipClass", "DataProcessInfoClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DataProcessSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DataProcessKeyClass", "OwnershipClass", "DataProcessInfoClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the data process. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DataProcessKeyClass", "OwnershipClass", "DataProcessInfoClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the data process. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class DatasetSnapshotClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["DatasetKeyClass", "DatasetPropertiesClass", "EditableDatasetPropertiesClass", "DatasetDeprecationClass", "DatasetUpstreamLineageClass", "UpstreamLineageClass", "InstitutionalMemoryClass", "OwnershipClass", "StatusClass", "SchemaMetadataClass", "EditableSchemaMetadataClass", "GlobalTagsClass", "GlossaryTermsClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "DatasetSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["DatasetKeyClass", "DatasetPropertiesClass", "EditableDatasetPropertiesClass", "DatasetDeprecationClass", "DatasetUpstreamLineageClass", "UpstreamLineageClass", "InstitutionalMemoryClass", "OwnershipClass", "StatusClass", "SchemaMetadataClass", "EditableSchemaMetadataClass", "GlobalTagsClass", "GlossaryTermsClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the dataset. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["DatasetKeyClass", "DatasetPropertiesClass", "EditableDatasetPropertiesClass", "DatasetDeprecationClass", "DatasetUpstreamLineageClass", "UpstreamLineageClass", "InstitutionalMemoryClass", "OwnershipClass", "StatusClass", "SchemaMetadataClass", "EditableSchemaMetadataClass", "GlobalTagsClass", "GlossaryTermsClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the dataset. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class GlossaryNodeSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific GlossaryNode entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.GlossaryNodeSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["GlossaryNodeKeyClass", "GlossaryNodeInfoClass", "OwnershipClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "GlossaryNodeSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["GlossaryNodeKeyClass", "GlossaryNodeInfoClass", "OwnershipClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the GlossaryNode. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["GlossaryNodeKeyClass", "GlossaryNodeInfoClass", "OwnershipClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the GlossaryNode. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class GlossaryTermSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific GlossaryTerm entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.GlossaryTermSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["GlossaryTermKeyClass", "GlossaryTermInfoClass", "OwnershipClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "GlossaryTermSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["GlossaryTermKeyClass", "GlossaryTermInfoClass", "OwnershipClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the GlossaryTerm. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["GlossaryTermKeyClass", "GlossaryTermInfoClass", "OwnershipClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the GlossaryTerm. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class MLFeatureSnapshotClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.MLFeatureSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["MLFeatureKeyClass", "MLFeaturePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "MLFeatureSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["MLFeatureKeyClass", "MLFeaturePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the MLFeature. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["MLFeatureKeyClass", "MLFeaturePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the MLFeature. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class MLFeatureTableSnapshotClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.MLFeatureTableSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["MLFeatureTableKeyClass", "MLFeatureTablePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "MLFeatureTableSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["MLFeatureTableKeyClass", "MLFeatureTablePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]]:
"""Getter: The list of metadata aspects associated with the MLFeatureTable. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["MLFeatureTableKeyClass", "MLFeatureTablePropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the MLFeatureTable. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class MLModelSnapshotClass(DictWrapper):
"""MLModel Snapshot entity details."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.MLModelSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["MLModelKeyClass", "OwnershipClass", "MLModelPropertiesClass", "IntendedUseClass", "MLModelFactorPromptsClass", "MetricsClass", "EvaluationDataClass", "TrainingDataClass", "QuantitativeAnalysesClass", "EthicalConsiderationsClass", "CaveatsAndRecommendationsClass", "InstitutionalMemoryClass", "SourceCodeClass", "StatusClass", "CostClass", "DeprecationClass", "BrowsePathsClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "MLModelSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["MLModelKeyClass", "OwnershipClass", "MLModelPropertiesClass", "IntendedUseClass", "MLModelFactorPromptsClass", "MetricsClass", "EvaluationDataClass", "TrainingDataClass", "QuantitativeAnalysesClass", "EthicalConsiderationsClass", "CaveatsAndRecommendationsClass", "InstitutionalMemoryClass", "SourceCodeClass", "StatusClass", "CostClass", "DeprecationClass", "BrowsePathsClass"]]:
"""Getter: The list of metadata aspects associated with the MLModel. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["MLModelKeyClass", "OwnershipClass", "MLModelPropertiesClass", "IntendedUseClass", "MLModelFactorPromptsClass", "MetricsClass", "EvaluationDataClass", "TrainingDataClass", "QuantitativeAnalysesClass", "EthicalConsiderationsClass", "CaveatsAndRecommendationsClass", "InstitutionalMemoryClass", "SourceCodeClass", "StatusClass", "CostClass", "DeprecationClass", "BrowsePathsClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the MLModel. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class MLPrimaryKeySnapshotClass(DictWrapper):
# No docs available.
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.MLPrimaryKeySnapshot")
def __init__(self,
urn: str,
aspects: List[Union["MLPrimaryKeyKeyClass", "MLPrimaryKeyPropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "MLPrimaryKeySnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["MLPrimaryKeyKeyClass", "MLPrimaryKeyPropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]]:
"""Getter: The list of metadata aspects associated with the MLPrimaryKey. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["MLPrimaryKeyKeyClass", "MLPrimaryKeyPropertiesClass", "OwnershipClass", "InstitutionalMemoryClass", "StatusClass", "DeprecationClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the MLPrimaryKey. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class TagSnapshotClass(DictWrapper):
"""A metadata snapshot for a specific dataset entity."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot")
def __init__(self,
urn: str,
aspects: List[Union["TagKeyClass", "OwnershipClass", "TagPropertiesClass", "StatusClass"]],
):
super().__init__()
self.urn = urn
self.aspects = aspects
@classmethod
def construct_with_defaults(cls) -> "TagSnapshotClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.urn = str()
self.aspects = list()
@property
def urn(self) -> str:
"""Getter: URN for the entity the metadata snapshot is associated with."""
return self._inner_dict.get('urn') # type: ignore
@urn.setter
def urn(self, value: str) -> None:
"""Setter: URN for the entity the metadata snapshot is associated with."""
self._inner_dict['urn'] = value
@property
def aspects(self) -> List[Union["TagKeyClass", "OwnershipClass", "TagPropertiesClass", "StatusClass"]]:
"""Getter: The list of metadata aspects associated with the dataset. Depending on the use case, this can either be all, or a selection, of supported aspects."""
return self._inner_dict.get('aspects') # type: ignore
@aspects.setter
def aspects(self, value: List[Union["TagKeyClass", "OwnershipClass", "TagPropertiesClass", "StatusClass"]]) -> None:
"""Setter: The list of metadata aspects associated with the dataset. Depending on the use case, this can either be all, or a selection, of supported aspects."""
self._inner_dict['aspects'] = value
class BaseDataClass(DictWrapper):
"""BaseData record"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.BaseData")
def __init__(self,
dataset: str,
motivation: Union[None, str]=None,
preProcessing: Union[None, List[str]]=None,
):
super().__init__()
self.dataset = dataset
self.motivation = motivation
self.preProcessing = preProcessing
@classmethod
def construct_with_defaults(cls) -> "BaseDataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.dataset = str()
self.motivation = self.RECORD_SCHEMA.field_map["motivation"].default
self.preProcessing = self.RECORD_SCHEMA.field_map["preProcessing"].default
@property
def dataset(self) -> str:
"""Getter: What dataset were used in the MLModel?"""
return self._inner_dict.get('dataset') # type: ignore
@dataset.setter
def dataset(self, value: str) -> None:
"""Setter: What dataset were used in the MLModel?"""
self._inner_dict['dataset'] = value
@property
def motivation(self) -> Union[None, str]:
"""Getter: Why was this dataset chosen?"""
return self._inner_dict.get('motivation') # type: ignore
@motivation.setter
def motivation(self, value: Union[None, str]) -> None:
"""Setter: Why was this dataset chosen?"""
self._inner_dict['motivation'] = value
@property
def preProcessing(self) -> Union[None, List[str]]:
"""Getter: How was the data preprocessed (e.g., tokenization of sentences, cropping of images, any filtering such as dropping images without faces)?"""
return self._inner_dict.get('preProcessing') # type: ignore
@preProcessing.setter
def preProcessing(self, value: Union[None, List[str]]) -> None:
"""Setter: How was the data preprocessed (e.g., tokenization of sentences, cropping of images, any filtering such as dropping images without faces)?"""
self._inner_dict['preProcessing'] = value
class CaveatDetailsClass(DictWrapper):
"""This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset? Are there additional recommendations for model use?"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.CaveatDetails")
def __init__(self,
needsFurtherTesting: Union[None, bool]=None,
caveatDescription: Union[None, str]=None,
groupsNotRepresented: Union[None, List[str]]=None,
):
super().__init__()
self.needsFurtherTesting = needsFurtherTesting
self.caveatDescription = caveatDescription
self.groupsNotRepresented = groupsNotRepresented
@classmethod
def construct_with_defaults(cls) -> "CaveatDetailsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.needsFurtherTesting = self.RECORD_SCHEMA.field_map["needsFurtherTesting"].default
self.caveatDescription = self.RECORD_SCHEMA.field_map["caveatDescription"].default
self.groupsNotRepresented = self.RECORD_SCHEMA.field_map["groupsNotRepresented"].default
@property
def needsFurtherTesting(self) -> Union[None, bool]:
"""Getter: Did the results suggest any further testing?"""
return self._inner_dict.get('needsFurtherTesting') # type: ignore
@needsFurtherTesting.setter
def needsFurtherTesting(self, value: Union[None, bool]) -> None:
"""Setter: Did the results suggest any further testing?"""
self._inner_dict['needsFurtherTesting'] = value
@property
def caveatDescription(self) -> Union[None, str]:
"""Getter: Caveat Description
For ex: Given gender classes are binary (male/not male), which we include as male/female. Further work needed to evaluate across a spectrum of genders."""
return self._inner_dict.get('caveatDescription') # type: ignore
@caveatDescription.setter
def caveatDescription(self, value: Union[None, str]) -> None:
"""Setter: Caveat Description
For ex: Given gender classes are binary (male/not male), which we include as male/female. Further work needed to evaluate across a spectrum of genders."""
self._inner_dict['caveatDescription'] = value
@property
def groupsNotRepresented(self) -> Union[None, List[str]]:
"""Getter: Relevant groups that were not represented in the evaluation dataset?"""
return self._inner_dict.get('groupsNotRepresented') # type: ignore
@groupsNotRepresented.setter
def groupsNotRepresented(self, value: Union[None, List[str]]) -> None:
"""Setter: Relevant groups that were not represented in the evaluation dataset?"""
self._inner_dict['groupsNotRepresented'] = value
class CaveatsAndRecommendationsClass(DictWrapper):
"""This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset? Are there additional recommendations for model use?"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.CaveatsAndRecommendations")
def __init__(self,
caveats: Union[None, "CaveatDetailsClass"]=None,
recommendations: Union[None, str]=None,
idealDatasetCharacteristics: Union[None, List[str]]=None,
):
super().__init__()
self.caveats = caveats
self.recommendations = recommendations
self.idealDatasetCharacteristics = idealDatasetCharacteristics
@classmethod
def construct_with_defaults(cls) -> "CaveatsAndRecommendationsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.caveats = self.RECORD_SCHEMA.field_map["caveats"].default
self.recommendations = self.RECORD_SCHEMA.field_map["recommendations"].default
self.idealDatasetCharacteristics = self.RECORD_SCHEMA.field_map["idealDatasetCharacteristics"].default
@property
def caveats(self) -> Union[None, "CaveatDetailsClass"]:
"""Getter: This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset?"""
return self._inner_dict.get('caveats') # type: ignore
@caveats.setter
def caveats(self, value: Union[None, "CaveatDetailsClass"]) -> None:
"""Setter: This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset?"""
self._inner_dict['caveats'] = value
@property
def recommendations(self) -> Union[None, str]:
"""Getter: Recommendations on where this MLModel should be used."""
return self._inner_dict.get('recommendations') # type: ignore
@recommendations.setter
def recommendations(self, value: Union[None, str]) -> None:
"""Setter: Recommendations on where this MLModel should be used."""
self._inner_dict['recommendations'] = value
@property
def idealDatasetCharacteristics(self) -> Union[None, List[str]]:
"""Getter: Ideal characteristics of an evaluation dataset for this MLModel"""
return self._inner_dict.get('idealDatasetCharacteristics') # type: ignore
@idealDatasetCharacteristics.setter
def idealDatasetCharacteristics(self, value: Union[None, List[str]]) -> None:
"""Setter: Ideal characteristics of an evaluation dataset for this MLModel"""
self._inner_dict['idealDatasetCharacteristics'] = value
class EthicalConsiderationsClass(DictWrapper):
"""This section is intended to demonstrate the ethical considerations that went into MLModel development, surfacing ethical challenges and solutions to stakeholders."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.EthicalConsiderations")
def __init__(self,
data: Union[None, List[str]]=None,
humanLife: Union[None, List[str]]=None,
mitigations: Union[None, List[str]]=None,
risksAndHarms: Union[None, List[str]]=None,
useCases: Union[None, List[str]]=None,
):
super().__init__()
self.data = data
self.humanLife = humanLife
self.mitigations = mitigations
self.risksAndHarms = risksAndHarms
self.useCases = useCases
@classmethod
def construct_with_defaults(cls) -> "EthicalConsiderationsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.data = self.RECORD_SCHEMA.field_map["data"].default
self.humanLife = self.RECORD_SCHEMA.field_map["humanLife"].default
self.mitigations = self.RECORD_SCHEMA.field_map["mitigations"].default
self.risksAndHarms = self.RECORD_SCHEMA.field_map["risksAndHarms"].default
self.useCases = self.RECORD_SCHEMA.field_map["useCases"].default
@property
def data(self) -> Union[None, List[str]]:
"""Getter: Does the MLModel use any sensitive data (e.g., protected classes)?"""
return self._inner_dict.get('data') # type: ignore
@data.setter
def data(self, value: Union[None, List[str]]) -> None:
"""Setter: Does the MLModel use any sensitive data (e.g., protected classes)?"""
self._inner_dict['data'] = value
@property
def humanLife(self) -> Union[None, List[str]]:
"""Getter: Is the MLModel intended to inform decisions about matters central to human life or flourishing – e.g., health or safety? Or could it be used in such a way?"""
return self._inner_dict.get('humanLife') # type: ignore
@humanLife.setter
def humanLife(self, value: Union[None, List[str]]) -> None:
"""Setter: Is the MLModel intended to inform decisions about matters central to human life or flourishing – e.g., health or safety? Or could it be used in such a way?"""
self._inner_dict['humanLife'] = value
@property
def mitigations(self) -> Union[None, List[str]]:
"""Getter: What risk mitigation strategies were used during MLModel development?"""
return self._inner_dict.get('mitigations') # type: ignore
@mitigations.setter
def mitigations(self, value: Union[None, List[str]]) -> None:
"""Setter: What risk mitigation strategies were used during MLModel development?"""
self._inner_dict['mitigations'] = value
@property
def risksAndHarms(self) -> Union[None, List[str]]:
"""Getter: What risks may be present in MLModel usage? Try to identify the potential recipients, likelihood, and magnitude of harms. If these cannot be determined, note that they were considered but remain unknown."""
return self._inner_dict.get('risksAndHarms') # type: ignore
@risksAndHarms.setter
def risksAndHarms(self, value: Union[None, List[str]]) -> None:
"""Setter: What risks may be present in MLModel usage? Try to identify the potential recipients, likelihood, and magnitude of harms. If these cannot be determined, note that they were considered but remain unknown."""
self._inner_dict['risksAndHarms'] = value
@property
def useCases(self) -> Union[None, List[str]]:
"""Getter: Are there any known MLModel use cases that are especially fraught? This may connect directly to the intended use section"""
return self._inner_dict.get('useCases') # type: ignore
@useCases.setter
def useCases(self, value: Union[None, List[str]]) -> None:
"""Setter: Are there any known MLModel use cases that are especially fraught? This may connect directly to the intended use section"""
self._inner_dict['useCases'] = value
class EvaluationDataClass(DictWrapper):
"""All referenced datasets would ideally point to any set of documents that provide visibility into the source and composition of the dataset."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.EvaluationData")
def __init__(self,
evaluationData: List["BaseDataClass"],
):
super().__init__()
self.evaluationData = evaluationData
@classmethod
def construct_with_defaults(cls) -> "EvaluationDataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.evaluationData = list()
@property
def evaluationData(self) -> List["BaseDataClass"]:
"""Getter: Details on the dataset(s) used for the quantitative analyses in the MLModel"""
return self._inner_dict.get('evaluationData') # type: ignore
@evaluationData.setter
def evaluationData(self, value: List["BaseDataClass"]) -> None:
"""Setter: Details on the dataset(s) used for the quantitative analyses in the MLModel"""
self._inner_dict['evaluationData'] = value
class IntendedUseClass(DictWrapper):
"""Intended Use for the ML Model"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.IntendedUse")
def __init__(self,
primaryUses: Union[None, List[str]]=None,
primaryUsers: Union[None, List[Union[str, "IntendedUserTypeClass"]]]=None,
outOfScopeUses: Union[None, List[str]]=None,
):
super().__init__()
self.primaryUses = primaryUses
self.primaryUsers = primaryUsers
self.outOfScopeUses = outOfScopeUses
@classmethod
def construct_with_defaults(cls) -> "IntendedUseClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.primaryUses = self.RECORD_SCHEMA.field_map["primaryUses"].default
self.primaryUsers = self.RECORD_SCHEMA.field_map["primaryUsers"].default
self.outOfScopeUses = self.RECORD_SCHEMA.field_map["outOfScopeUses"].default
@property
def primaryUses(self) -> Union[None, List[str]]:
"""Getter: Primary Use cases for the MLModel."""
return self._inner_dict.get('primaryUses') # type: ignore
@primaryUses.setter
def primaryUses(self, value: Union[None, List[str]]) -> None:
"""Setter: Primary Use cases for the MLModel."""
self._inner_dict['primaryUses'] = value
@property
def primaryUsers(self) -> Union[None, List[Union[str, "IntendedUserTypeClass"]]]:
"""Getter: Primary Intended Users - For example, was the MLModel developed for entertainment purposes, for hobbyists, or enterprise solutions?"""
return self._inner_dict.get('primaryUsers') # type: ignore
@primaryUsers.setter
def primaryUsers(self, value: Union[None, List[Union[str, "IntendedUserTypeClass"]]]) -> None:
"""Setter: Primary Intended Users - For example, was the MLModel developed for entertainment purposes, for hobbyists, or enterprise solutions?"""
self._inner_dict['primaryUsers'] = value
@property
def outOfScopeUses(self) -> Union[None, List[str]]:
"""Getter: Highlight technology that the MLModel might easily be confused with, or related contexts that users could try to apply the MLModel to."""
return self._inner_dict.get('outOfScopeUses') # type: ignore
@outOfScopeUses.setter
def outOfScopeUses(self, value: Union[None, List[str]]) -> None:
"""Setter: Highlight technology that the MLModel might easily be confused with, or related contexts that users could try to apply the MLModel to."""
self._inner_dict['outOfScopeUses'] = value
class IntendedUserTypeClass(object):
# No docs available.
ENTERPRISE = "ENTERPRISE"
HOBBY = "HOBBY"
ENTERTAINMENT = "ENTERTAINMENT"
class MLFeaturePropertiesClass(DictWrapper):
"""Properties associated with a MLFeature"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLFeatureProperties")
def __init__(self,
description: Union[None, str]=None,
dataType: Union[None, Union[str, "MLFeatureDataTypeClass"]]=None,
version: Union[None, "VersionTagClass"]=None,
sources: Union[None, List[str]]=None,
):
super().__init__()
self.description = description
self.dataType = dataType
self.version = version
self.sources = sources
@classmethod
def construct_with_defaults(cls) -> "MLFeaturePropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.dataType = self.RECORD_SCHEMA.field_map["dataType"].default
self.version = self.RECORD_SCHEMA.field_map["version"].default
self.sources = self.RECORD_SCHEMA.field_map["sources"].default
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the MLFeature"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the MLFeature"""
self._inner_dict['description'] = value
@property
def dataType(self) -> Union[None, Union[str, "MLFeatureDataTypeClass"]]:
"""Getter: Data Type of the MLFeature"""
return self._inner_dict.get('dataType') # type: ignore
@dataType.setter
def dataType(self, value: Union[None, Union[str, "MLFeatureDataTypeClass"]]) -> None:
"""Setter: Data Type of the MLFeature"""
self._inner_dict['dataType'] = value
@property
def version(self) -> Union[None, "VersionTagClass"]:
"""Getter: Version of the MLFeature"""
return self._inner_dict.get('version') # type: ignore
@version.setter
def version(self, value: Union[None, "VersionTagClass"]) -> None:
"""Setter: Version of the MLFeature"""
self._inner_dict['version'] = value
@property
def sources(self) -> Union[None, List[str]]:
"""Getter: Source of the MLFeature"""
return self._inner_dict.get('sources') # type: ignore
@sources.setter
def sources(self, value: Union[None, List[str]]) -> None:
"""Setter: Source of the MLFeature"""
self._inner_dict['sources'] = value
class MLFeatureTablePropertiesClass(DictWrapper):
"""Properties associated with a MLFeatureTable"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLFeatureTableProperties")
def __init__(self,
description: Union[None, str]=None,
mlFeatures: Union[None, List[str]]=None,
mlPrimaryKeys: Union[None, List[str]]=None,
):
super().__init__()
self.description = description
self.mlFeatures = mlFeatures
self.mlPrimaryKeys = mlPrimaryKeys
@classmethod
def construct_with_defaults(cls) -> "MLFeatureTablePropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.mlFeatures = self.RECORD_SCHEMA.field_map["mlFeatures"].default
self.mlPrimaryKeys = self.RECORD_SCHEMA.field_map["mlPrimaryKeys"].default
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the MLFeatureTable"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the MLFeatureTable"""
self._inner_dict['description'] = value
@property
def mlFeatures(self) -> Union[None, List[str]]:
"""Getter: List of features contained in the feature table"""
return self._inner_dict.get('mlFeatures') # type: ignore
@mlFeatures.setter
def mlFeatures(self, value: Union[None, List[str]]) -> None:
"""Setter: List of features contained in the feature table"""
self._inner_dict['mlFeatures'] = value
@property
def mlPrimaryKeys(self) -> Union[None, List[str]]:
"""Getter: List of primary keys in the feature table (if multiple, assumed to act as a composite key)"""
return self._inner_dict.get('mlPrimaryKeys') # type: ignore
@mlPrimaryKeys.setter
def mlPrimaryKeys(self, value: Union[None, List[str]]) -> None:
"""Setter: List of primary keys in the feature table (if multiple, assumed to act as a composite key)"""
self._inner_dict['mlPrimaryKeys'] = value
class MLModelFactorPromptsClass(DictWrapper):
"""Prompts which affect the performance of the MLModel"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLModelFactorPrompts")
def __init__(self,
relevantFactors: Union[None, List["MLModelFactorsClass"]]=None,
evaluationFactors: Union[None, List["MLModelFactorsClass"]]=None,
):
super().__init__()
self.relevantFactors = relevantFactors
self.evaluationFactors = evaluationFactors
@classmethod
def construct_with_defaults(cls) -> "MLModelFactorPromptsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.relevantFactors = self.RECORD_SCHEMA.field_map["relevantFactors"].default
self.evaluationFactors = self.RECORD_SCHEMA.field_map["evaluationFactors"].default
@property
def relevantFactors(self) -> Union[None, List["MLModelFactorsClass"]]:
"""Getter: What are foreseeable salient factors for which MLModel performance may vary, and how were these determined?"""
return self._inner_dict.get('relevantFactors') # type: ignore
@relevantFactors.setter
def relevantFactors(self, value: Union[None, List["MLModelFactorsClass"]]) -> None:
"""Setter: What are foreseeable salient factors for which MLModel performance may vary, and how were these determined?"""
self._inner_dict['relevantFactors'] = value
@property
def evaluationFactors(self) -> Union[None, List["MLModelFactorsClass"]]:
"""Getter: Which factors are being reported, and why were these chosen?"""
return self._inner_dict.get('evaluationFactors') # type: ignore
@evaluationFactors.setter
def evaluationFactors(self, value: Union[None, List["MLModelFactorsClass"]]) -> None:
"""Setter: Which factors are being reported, and why were these chosen?"""
self._inner_dict['evaluationFactors'] = value
class MLModelFactorsClass(DictWrapper):
"""Factors affecting the performance of the MLModel."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLModelFactors")
def __init__(self,
groups: Union[None, List[str]]=None,
instrumentation: Union[None, List[str]]=None,
environment: Union[None, List[str]]=None,
):
super().__init__()
self.groups = groups
self.instrumentation = instrumentation
self.environment = environment
@classmethod
def construct_with_defaults(cls) -> "MLModelFactorsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.groups = self.RECORD_SCHEMA.field_map["groups"].default
self.instrumentation = self.RECORD_SCHEMA.field_map["instrumentation"].default
self.environment = self.RECORD_SCHEMA.field_map["environment"].default
@property
def groups(self) -> Union[None, List[str]]:
"""Getter: Groups refers to distinct categories with similar characteristics that are present in the evaluation data instances.
For human-centric machine learning MLModels, groups are people who share one or multiple characteristics."""
return self._inner_dict.get('groups') # type: ignore
@groups.setter
def groups(self, value: Union[None, List[str]]) -> None:
"""Setter: Groups refers to distinct categories with similar characteristics that are present in the evaluation data instances.
For human-centric machine learning MLModels, groups are people who share one or multiple characteristics."""
self._inner_dict['groups'] = value
@property
def instrumentation(self) -> Union[None, List[str]]:
"""Getter: The performance of a MLModel can vary depending on what instruments were used to capture the input to the MLModel.
For example, a face detection model may perform differently depending on the camera’s hardware and software,
including lens, image stabilization, high dynamic range techniques, and background blurring for portrait mode."""
return self._inner_dict.get('instrumentation') # type: ignore
@instrumentation.setter
def instrumentation(self, value: Union[None, List[str]]) -> None:
"""Setter: The performance of a MLModel can vary depending on what instruments were used to capture the input to the MLModel.
For example, a face detection model may perform differently depending on the camera’s hardware and software,
including lens, image stabilization, high dynamic range techniques, and background blurring for portrait mode."""
self._inner_dict['instrumentation'] = value
@property
def environment(self) -> Union[None, List[str]]:
"""Getter: A further factor affecting MLModel performance is the environment in which it is deployed."""
return self._inner_dict.get('environment') # type: ignore
@environment.setter
def environment(self, value: Union[None, List[str]]) -> None:
"""Setter: A further factor affecting MLModel performance is the environment in which it is deployed."""
self._inner_dict['environment'] = value
class MLModelPropertiesClass(DictWrapper):
"""Properties associated with a ML Model"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLModelProperties")
def __init__(self,
description: Union[None, str]=None,
date: Union[None, int]=None,
version: Union[None, "VersionTagClass"]=None,
type: Union[None, str]=None,
hyperParameters: Union[None, Dict[str, Union[str, int, float, float, bool]]]=None,
mlFeatures: Union[None, List[str]]=None,
tags: Optional[List[str]]=None,
):
super().__init__()
self.description = description
self.date = date
self.version = version
self.type = type
self.hyperParameters = hyperParameters
self.mlFeatures = mlFeatures
if tags is None:
self.tags = []
else:
self.tags = tags
@classmethod
def construct_with_defaults(cls) -> "MLModelPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.date = self.RECORD_SCHEMA.field_map["date"].default
self.version = self.RECORD_SCHEMA.field_map["version"].default
self.type = self.RECORD_SCHEMA.field_map["type"].default
self.hyperParameters = self.RECORD_SCHEMA.field_map["hyperParameters"].default
self.mlFeatures = self.RECORD_SCHEMA.field_map["mlFeatures"].default
self.tags = list()
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the MLModel"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the MLModel"""
self._inner_dict['description'] = value
@property
def date(self) -> Union[None, int]:
"""Getter: Date when the MLModel was developed"""
return self._inner_dict.get('date') # type: ignore
@date.setter
def date(self, value: Union[None, int]) -> None:
"""Setter: Date when the MLModel was developed"""
self._inner_dict['date'] = value
@property
def version(self) -> Union[None, "VersionTagClass"]:
"""Getter: Version of the MLModel"""
return self._inner_dict.get('version') # type: ignore
@version.setter
def version(self, value: Union[None, "VersionTagClass"]) -> None:
"""Setter: Version of the MLModel"""
self._inner_dict['version'] = value
@property
def type(self) -> Union[None, str]:
"""Getter: Type of Algorithm or MLModel such as whether it is a Naive Bayes classifier, Convolutional Neural Network, etc"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[None, str]) -> None:
"""Setter: Type of Algorithm or MLModel such as whether it is a Naive Bayes classifier, Convolutional Neural Network, etc"""
self._inner_dict['type'] = value
@property
def hyperParameters(self) -> Union[None, Dict[str, Union[str, int, float, float, bool]]]:
"""Getter: Hyper Parameters of the MLModel"""
return self._inner_dict.get('hyperParameters') # type: ignore
@hyperParameters.setter
def hyperParameters(self, value: Union[None, Dict[str, Union[str, int, float, float, bool]]]) -> None:
"""Setter: Hyper Parameters of the MLModel"""
self._inner_dict['hyperParameters'] = value
@property
def mlFeatures(self) -> Union[None, List[str]]:
"""Getter: List of features used for MLModel training"""
return self._inner_dict.get('mlFeatures') # type: ignore
@mlFeatures.setter
def mlFeatures(self, value: Union[None, List[str]]) -> None:
"""Setter: List of features used for MLModel training"""
self._inner_dict['mlFeatures'] = value
@property
def tags(self) -> List[str]:
"""Getter: Tags for the MLModel"""
return self._inner_dict.get('tags') # type: ignore
@tags.setter
def tags(self, value: List[str]) -> None:
"""Setter: Tags for the MLModel"""
self._inner_dict['tags'] = value
class MLPrimaryKeyPropertiesClass(DictWrapper):
"""Properties associated with a MLPrimaryKey"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.MLPrimaryKeyProperties")
def __init__(self,
sources: List[str],
description: Union[None, str]=None,
dataType: Union[None, Union[str, "MLFeatureDataTypeClass"]]=None,
version: Union[None, "VersionTagClass"]=None,
):
super().__init__()
self.description = description
self.dataType = dataType
self.version = version
self.sources = sources
@classmethod
def construct_with_defaults(cls) -> "MLPrimaryKeyPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.dataType = self.RECORD_SCHEMA.field_map["dataType"].default
self.version = self.RECORD_SCHEMA.field_map["version"].default
self.sources = list()
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the MLPrimaryKey"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the MLPrimaryKey"""
self._inner_dict['description'] = value
@property
def dataType(self) -> Union[None, Union[str, "MLFeatureDataTypeClass"]]:
"""Getter: Data Type of the MLPrimaryKey"""
return self._inner_dict.get('dataType') # type: ignore
@dataType.setter
def dataType(self, value: Union[None, Union[str, "MLFeatureDataTypeClass"]]) -> None:
"""Setter: Data Type of the MLPrimaryKey"""
self._inner_dict['dataType'] = value
@property
def version(self) -> Union[None, "VersionTagClass"]:
"""Getter: Version of the MLPrimaryKey"""
return self._inner_dict.get('version') # type: ignore
@version.setter
def version(self, value: Union[None, "VersionTagClass"]) -> None:
"""Setter: Version of the MLPrimaryKey"""
self._inner_dict['version'] = value
@property
def sources(self) -> List[str]:
"""Getter: Source of the MLPrimaryKey"""
return self._inner_dict.get('sources') # type: ignore
@sources.setter
def sources(self, value: List[str]) -> None:
"""Setter: Source of the MLPrimaryKey"""
self._inner_dict['sources'] = value
class MetricsClass(DictWrapper):
"""Metrics to be featured for the MLModel."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.Metrics")
def __init__(self,
performanceMeasures: Union[None, List[str]]=None,
decisionThreshold: Union[None, List[str]]=None,
):
super().__init__()
self.performanceMeasures = performanceMeasures
self.decisionThreshold = decisionThreshold
@classmethod
def construct_with_defaults(cls) -> "MetricsClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.performanceMeasures = self.RECORD_SCHEMA.field_map["performanceMeasures"].default
self.decisionThreshold = self.RECORD_SCHEMA.field_map["decisionThreshold"].default
@property
def performanceMeasures(self) -> Union[None, List[str]]:
"""Getter: Measures of MLModel performance"""
return self._inner_dict.get('performanceMeasures') # type: ignore
@performanceMeasures.setter
def performanceMeasures(self, value: Union[None, List[str]]) -> None:
"""Setter: Measures of MLModel performance"""
self._inner_dict['performanceMeasures'] = value
@property
def decisionThreshold(self) -> Union[None, List[str]]:
"""Getter: Decision Thresholds used (if any)?"""
return self._inner_dict.get('decisionThreshold') # type: ignore
@decisionThreshold.setter
def decisionThreshold(self, value: Union[None, List[str]]) -> None:
"""Setter: Decision Thresholds used (if any)?"""
self._inner_dict['decisionThreshold'] = value
class QuantitativeAnalysesClass(DictWrapper):
"""Quantitative analyses should be disaggregated, that is, broken down by the chosen factors. Quantitative analyses should provide the results of evaluating the MLModel according to the chosen metrics, providing confidence interval values when possible."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.QuantitativeAnalyses")
def __init__(self,
unitaryResults: Union[None, str]=None,
intersectionalResults: Union[None, str]=None,
):
super().__init__()
self.unitaryResults = unitaryResults
self.intersectionalResults = intersectionalResults
@classmethod
def construct_with_defaults(cls) -> "QuantitativeAnalysesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.unitaryResults = self.RECORD_SCHEMA.field_map["unitaryResults"].default
self.intersectionalResults = self.RECORD_SCHEMA.field_map["intersectionalResults"].default
@property
def unitaryResults(self) -> Union[None, str]:
"""Getter: Link to a dashboard with results showing how the MLModel performed with respect to each factor"""
return self._inner_dict.get('unitaryResults') # type: ignore
@unitaryResults.setter
def unitaryResults(self, value: Union[None, str]) -> None:
"""Setter: Link to a dashboard with results showing how the MLModel performed with respect to each factor"""
self._inner_dict['unitaryResults'] = value
@property
def intersectionalResults(self) -> Union[None, str]:
"""Getter: Link to a dashboard with results showing how the MLModel performed with respect to the intersection of evaluated factors?"""
return self._inner_dict.get('intersectionalResults') # type: ignore
@intersectionalResults.setter
def intersectionalResults(self, value: Union[None, str]) -> None:
"""Setter: Link to a dashboard with results showing how the MLModel performed with respect to the intersection of evaluated factors?"""
self._inner_dict['intersectionalResults'] = value
class SourceCodeClass(DictWrapper):
"""Source Code"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.SourceCode")
def __init__(self,
sourceCode: List["SourceCodeUrlClass"],
):
super().__init__()
self.sourceCode = sourceCode
@classmethod
def construct_with_defaults(cls) -> "SourceCodeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.sourceCode = list()
@property
def sourceCode(self) -> List["SourceCodeUrlClass"]:
"""Getter: Source Code along with types"""
return self._inner_dict.get('sourceCode') # type: ignore
@sourceCode.setter
def sourceCode(self, value: List["SourceCodeUrlClass"]) -> None:
"""Setter: Source Code along with types"""
self._inner_dict['sourceCode'] = value
class SourceCodeUrlClass(DictWrapper):
"""Source Code Url Entity"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.SourceCodeUrl")
def __init__(self,
type: Union[str, "SourceCodeUrlTypeClass"],
sourceCodeUrl: str,
):
super().__init__()
self.type = type
self.sourceCodeUrl = sourceCodeUrl
@classmethod
def construct_with_defaults(cls) -> "SourceCodeUrlClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.type = SourceCodeUrlTypeClass.ML_MODEL_SOURCE_CODE
self.sourceCodeUrl = str()
@property
def type(self) -> Union[str, "SourceCodeUrlTypeClass"]:
"""Getter: Source Code Url Types"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union[str, "SourceCodeUrlTypeClass"]) -> None:
"""Setter: Source Code Url Types"""
self._inner_dict['type'] = value
@property
def sourceCodeUrl(self) -> str:
"""Getter: Source Code Url"""
return self._inner_dict.get('sourceCodeUrl') # type: ignore
@sourceCodeUrl.setter
def sourceCodeUrl(self, value: str) -> None:
"""Setter: Source Code Url"""
self._inner_dict['sourceCodeUrl'] = value
class SourceCodeUrlTypeClass(object):
# No docs available.
ML_MODEL_SOURCE_CODE = "ML_MODEL_SOURCE_CODE"
TRAINING_PIPELINE_SOURCE_CODE = "TRAINING_PIPELINE_SOURCE_CODE"
EVALUATION_PIPELINE_SOURCE_CODE = "EVALUATION_PIPELINE_SOURCE_CODE"
class TrainingDataClass(DictWrapper):
"""Ideally, the MLModel card would contain as much information about the training data as the evaluation data. However, there might be cases where it is not feasible to provide this level of detailed information about the training data. For example, the data may be proprietary, or require a non-disclosure agreement. In these cases, we advocate for basic details about the distributions over groups in the data, as well as any other details that could inform stakeholders on the kinds of biases the model may have encoded."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.ml.metadata.TrainingData")
def __init__(self,
trainingData: List["BaseDataClass"],
):
super().__init__()
self.trainingData = trainingData
@classmethod
def construct_with_defaults(cls) -> "TrainingDataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.trainingData = list()
@property
def trainingData(self) -> List["BaseDataClass"]:
"""Getter: Details on the dataset(s) used for training the MLModel"""
return self._inner_dict.get('trainingData') # type: ignore
@trainingData.setter
def trainingData(self, value: List["BaseDataClass"]) -> None:
"""Setter: Details on the dataset(s) used for training the MLModel"""
self._inner_dict['trainingData'] = value
class MetadataAuditEventClass(DictWrapper):
"""Kafka event for capturing update made to an entity's metadata."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.mxe.MetadataAuditEvent")
def __init__(self,
newSnapshot: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"],
auditHeader: Union[None, "KafkaAuditHeaderClass"]=None,
oldSnapshot: Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]=None,
):
super().__init__()
self.auditHeader = auditHeader
self.oldSnapshot = oldSnapshot
self.newSnapshot = newSnapshot
@classmethod
def construct_with_defaults(cls) -> "MetadataAuditEventClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.auditHeader = self.RECORD_SCHEMA.field_map["auditHeader"].default
self.oldSnapshot = self.RECORD_SCHEMA.field_map["oldSnapshot"].default
self.newSnapshot = ChartSnapshotClass.construct_with_defaults()
@property
def auditHeader(self) -> Union[None, "KafkaAuditHeaderClass"]:
"""Getter: Kafka audit header. See go/kafkaauditheader for more info."""
return self._inner_dict.get('auditHeader') # type: ignore
@auditHeader.setter
def auditHeader(self, value: Union[None, "KafkaAuditHeaderClass"]) -> None:
"""Setter: Kafka audit header. See go/kafkaauditheader for more info."""
self._inner_dict['auditHeader'] = value
@property
def oldSnapshot(self) -> Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the metadata before the update. Set to null for newly created metadata. Only the metadata aspects affected by the update are included in the snapshot."""
return self._inner_dict.get('oldSnapshot') # type: ignore
@oldSnapshot.setter
def oldSnapshot(self, value: Union[None, "ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the metadata before the update. Set to null for newly created metadata. Only the metadata aspects affected by the update are included in the snapshot."""
self._inner_dict['oldSnapshot'] = value
@property
def newSnapshot(self) -> Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the metadata after the update. Only the metadata aspects affected by the update are included in the snapshot."""
return self._inner_dict.get('newSnapshot') # type: ignore
@newSnapshot.setter
def newSnapshot(self, value: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the metadata after the update. Only the metadata aspects affected by the update are included in the snapshot."""
self._inner_dict['newSnapshot'] = value
class MetadataChangeEventClass(DictWrapper):
"""Kafka event for proposing a metadata change for an entity. A corresponding MetadataAuditEvent is emitted when the change is accepted and committed, otherwise a FailedMetadataChangeEvent will be emitted instead."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.mxe.MetadataChangeEvent")
def __init__(self,
proposedSnapshot: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"],
auditHeader: Union[None, "KafkaAuditHeaderClass"]=None,
proposedDelta: None=None,
):
super().__init__()
self.auditHeader = auditHeader
self.proposedSnapshot = proposedSnapshot
self.proposedDelta = proposedDelta
@classmethod
def construct_with_defaults(cls) -> "MetadataChangeEventClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.auditHeader = self.RECORD_SCHEMA.field_map["auditHeader"].default
self.proposedSnapshot = ChartSnapshotClass.construct_with_defaults()
self.proposedDelta = self.RECORD_SCHEMA.field_map["proposedDelta"].default
@property
def auditHeader(self) -> Union[None, "KafkaAuditHeaderClass"]:
"""Getter: Kafka audit header. See go/kafkaauditheader for more info."""
return self._inner_dict.get('auditHeader') # type: ignore
@auditHeader.setter
def auditHeader(self, value: Union[None, "KafkaAuditHeaderClass"]) -> None:
"""Setter: Kafka audit header. See go/kafkaauditheader for more info."""
self._inner_dict['auditHeader'] = value
@property
def proposedSnapshot(self) -> Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]:
"""Getter: Snapshot of the proposed metadata change. Include only the aspects affected by the change in the snapshot."""
return self._inner_dict.get('proposedSnapshot') # type: ignore
@proposedSnapshot.setter
def proposedSnapshot(self, value: Union["ChartSnapshotClass", "CorpGroupSnapshotClass", "CorpUserSnapshotClass", "DashboardSnapshotClass", "DataFlowSnapshotClass", "DataJobSnapshotClass", "DatasetSnapshotClass", "DataProcessSnapshotClass", "DataPlatformSnapshotClass", "MLModelSnapshotClass", "MLPrimaryKeySnapshotClass", "MLFeatureSnapshotClass", "MLFeatureTableSnapshotClass", "TagSnapshotClass", "GlossaryTermSnapshotClass", "GlossaryNodeSnapshotClass"]) -> None:
"""Setter: Snapshot of the proposed metadata change. Include only the aspects affected by the change in the snapshot."""
self._inner_dict['proposedSnapshot'] = value
@property
def proposedDelta(self) -> None:
"""Getter: Delta of the proposed metadata partial update."""
return self._inner_dict.get('proposedDelta') # type: ignore
@proposedDelta.setter
def proposedDelta(self, value: None) -> None:
"""Setter: Delta of the proposed metadata partial update."""
self._inner_dict['proposedDelta'] = value
class ArrayTypeClass(DictWrapper):
"""Array field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.ArrayType")
def __init__(self,
nestedType: Union[None, List[str]]=None,
):
super().__init__()
self.nestedType = nestedType
@classmethod
def construct_with_defaults(cls) -> "ArrayTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.nestedType = self.RECORD_SCHEMA.field_map["nestedType"].default
@property
def nestedType(self) -> Union[None, List[str]]:
"""Getter: List of types this array holds."""
return self._inner_dict.get('nestedType') # type: ignore
@nestedType.setter
def nestedType(self, value: Union[None, List[str]]) -> None:
"""Setter: List of types this array holds."""
self._inner_dict['nestedType'] = value
class BinaryJsonSchemaClass(DictWrapper):
"""Schema text of binary JSON schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BinaryJsonSchema")
def __init__(self,
schema: str,
):
super().__init__()
self.schema = schema
@classmethod
def construct_with_defaults(cls) -> "BinaryJsonSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.schema = str()
@property
def schema(self) -> str:
"""Getter: The native schema text for binary JSON file format."""
return self._inner_dict.get('schema') # type: ignore
@schema.setter
def schema(self, value: str) -> None:
"""Setter: The native schema text for binary JSON file format."""
self._inner_dict['schema'] = value
class BooleanTypeClass(DictWrapper):
"""Boolean field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BooleanType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "BooleanTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class BytesTypeClass(DictWrapper):
"""Bytes field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.BytesType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "BytesTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class DatasetFieldForeignKeyClass(DictWrapper):
"""For non-urn based foregin keys."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.DatasetFieldForeignKey")
def __init__(self,
parentDataset: str,
currentFieldPaths: List[str],
parentField: str,
):
super().__init__()
self.parentDataset = parentDataset
self.currentFieldPaths = currentFieldPaths
self.parentField = parentField
@classmethod
def construct_with_defaults(cls) -> "DatasetFieldForeignKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.parentDataset = str()
self.currentFieldPaths = list()
self.parentField = str()
@property
def parentDataset(self) -> str:
"""Getter: dataset that stores the resource."""
return self._inner_dict.get('parentDataset') # type: ignore
@parentDataset.setter
def parentDataset(self, value: str) -> None:
"""Setter: dataset that stores the resource."""
self._inner_dict['parentDataset'] = value
@property
def currentFieldPaths(self) -> List[str]:
"""Getter: List of fields in hosting(current) SchemaMetadata that conform a foreign key. List can contain a single entry or multiple entries if several entries in hosting schema conform a foreign key in a single parent dataset."""
return self._inner_dict.get('currentFieldPaths') # type: ignore
@currentFieldPaths.setter
def currentFieldPaths(self, value: List[str]) -> None:
"""Setter: List of fields in hosting(current) SchemaMetadata that conform a foreign key. List can contain a single entry or multiple entries if several entries in hosting schema conform a foreign key in a single parent dataset."""
self._inner_dict['currentFieldPaths'] = value
@property
def parentField(self) -> str:
"""Getter: SchemaField@fieldPath that uniquely identify field in parent dataset that this field references."""
return self._inner_dict.get('parentField') # type: ignore
@parentField.setter
def parentField(self, value: str) -> None:
"""Setter: SchemaField@fieldPath that uniquely identify field in parent dataset that this field references."""
self._inner_dict['parentField'] = value
class DateTypeClass(DictWrapper):
"""Date field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.DateType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "DateTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class EditableSchemaFieldInfoClass(DictWrapper):
"""SchemaField to describe metadata related to dataset schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EditableSchemaFieldInfo")
def __init__(self,
fieldPath: str,
description: Union[None, str]=None,
globalTags: Union[None, "GlobalTagsClass"]=None,
):
super().__init__()
self.fieldPath = fieldPath
self.description = description
self.globalTags = globalTags
@classmethod
def construct_with_defaults(cls) -> "EditableSchemaFieldInfoClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.fieldPath = str()
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.globalTags = self.RECORD_SCHEMA.field_map["globalTags"].default
@property
def fieldPath(self) -> str:
"""Getter: FieldPath uniquely identifying the SchemaField this metadata is associated with"""
return self._inner_dict.get('fieldPath') # type: ignore
@fieldPath.setter
def fieldPath(self, value: str) -> None:
"""Setter: FieldPath uniquely identifying the SchemaField this metadata is associated with"""
self._inner_dict['fieldPath'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Description"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Description"""
self._inner_dict['description'] = value
@property
def globalTags(self) -> Union[None, "GlobalTagsClass"]:
"""Getter: Tags associated with the field"""
return self._inner_dict.get('globalTags') # type: ignore
@globalTags.setter
def globalTags(self, value: Union[None, "GlobalTagsClass"]) -> None:
"""Setter: Tags associated with the field"""
self._inner_dict['globalTags'] = value
class EditableSchemaMetadataClass(DictWrapper):
"""EditableSchemaMetadata stores editable changes made to schema metadata. This separates changes made from
ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EditableSchemaMetadata")
def __init__(self,
created: "AuditStampClass",
lastModified: "AuditStampClass",
editableSchemaFieldInfo: List["EditableSchemaFieldInfoClass"],
deleted: Union[None, "AuditStampClass"]=None,
):
super().__init__()
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.editableSchemaFieldInfo = editableSchemaFieldInfo
@classmethod
def construct_with_defaults(cls) -> "EditableSchemaMetadataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.editableSchemaFieldInfo = list()
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def editableSchemaFieldInfo(self) -> List["EditableSchemaFieldInfoClass"]:
"""Getter: Client provided a list of fields from document schema."""
return self._inner_dict.get('editableSchemaFieldInfo') # type: ignore
@editableSchemaFieldInfo.setter
def editableSchemaFieldInfo(self, value: List["EditableSchemaFieldInfoClass"]) -> None:
"""Setter: Client provided a list of fields from document schema."""
self._inner_dict['editableSchemaFieldInfo'] = value
class EnumTypeClass(DictWrapper):
"""Enum field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EnumType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "EnumTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class EspressoSchemaClass(DictWrapper):
"""Schema text of an espresso table schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.EspressoSchema")
def __init__(self,
documentSchema: str,
tableSchema: str,
):
super().__init__()
self.documentSchema = documentSchema
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "EspressoSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.documentSchema = str()
self.tableSchema = str()
@property
def documentSchema(self) -> str:
"""Getter: The native espresso document schema."""
return self._inner_dict.get('documentSchema') # type: ignore
@documentSchema.setter
def documentSchema(self, value: str) -> None:
"""Setter: The native espresso document schema."""
self._inner_dict['documentSchema'] = value
@property
def tableSchema(self) -> str:
"""Getter: The espresso table schema definition."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The espresso table schema definition."""
self._inner_dict['tableSchema'] = value
class FixedTypeClass(DictWrapper):
"""Fixed field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.FixedType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "FixedTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class ForeignKeySpecClass(DictWrapper):
"""Description of a foreign key in a schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.ForeignKeySpec")
def __init__(self,
foreignKey: Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"],
):
super().__init__()
self.foreignKey = foreignKey
@classmethod
def construct_with_defaults(cls) -> "ForeignKeySpecClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.foreignKey = DatasetFieldForeignKeyClass.construct_with_defaults()
@property
def foreignKey(self) -> Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"]:
"""Getter: Foreign key definition in metadata schema."""
return self._inner_dict.get('foreignKey') # type: ignore
@foreignKey.setter
def foreignKey(self, value: Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"]) -> None:
"""Setter: Foreign key definition in metadata schema."""
self._inner_dict['foreignKey'] = value
class KafkaSchemaClass(DictWrapper):
"""Schema holder for kafka schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.KafkaSchema")
def __init__(self,
documentSchema: str,
):
super().__init__()
self.documentSchema = documentSchema
@classmethod
def construct_with_defaults(cls) -> "KafkaSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.documentSchema = str()
@property
def documentSchema(self) -> str:
"""Getter: The native kafka document schema. This is a human readable avro document schema."""
return self._inner_dict.get('documentSchema') # type: ignore
@documentSchema.setter
def documentSchema(self, value: str) -> None:
"""Setter: The native kafka document schema. This is a human readable avro document schema."""
self._inner_dict['documentSchema'] = value
class KeyValueSchemaClass(DictWrapper):
"""Schema text of a key-value store schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.KeyValueSchema")
def __init__(self,
keySchema: str,
valueSchema: str,
):
super().__init__()
self.keySchema = keySchema
self.valueSchema = valueSchema
@classmethod
def construct_with_defaults(cls) -> "KeyValueSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.keySchema = str()
self.valueSchema = str()
@property
def keySchema(self) -> str:
"""Getter: The raw schema for the key in the key-value store."""
return self._inner_dict.get('keySchema') # type: ignore
@keySchema.setter
def keySchema(self, value: str) -> None:
"""Setter: The raw schema for the key in the key-value store."""
self._inner_dict['keySchema'] = value
@property
def valueSchema(self) -> str:
"""Getter: The raw schema for the value in the key-value store."""
return self._inner_dict.get('valueSchema') # type: ignore
@valueSchema.setter
def valueSchema(self, value: str) -> None:
"""Setter: The raw schema for the value in the key-value store."""
self._inner_dict['valueSchema'] = value
class MapTypeClass(DictWrapper):
"""Map field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.MapType")
def __init__(self,
keyType: Union[None, str]=None,
valueType: Union[None, str]=None,
):
super().__init__()
self.keyType = keyType
self.valueType = valueType
@classmethod
def construct_with_defaults(cls) -> "MapTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.keyType = self.RECORD_SCHEMA.field_map["keyType"].default
self.valueType = self.RECORD_SCHEMA.field_map["valueType"].default
@property
def keyType(self) -> Union[None, str]:
"""Getter: Key type in a map"""
return self._inner_dict.get('keyType') # type: ignore
@keyType.setter
def keyType(self, value: Union[None, str]) -> None:
"""Setter: Key type in a map"""
self._inner_dict['keyType'] = value
@property
def valueType(self) -> Union[None, str]:
"""Getter: Type of the value in a map"""
return self._inner_dict.get('valueType') # type: ignore
@valueType.setter
def valueType(self, value: Union[None, str]) -> None:
"""Setter: Type of the value in a map"""
self._inner_dict['valueType'] = value
class MySqlDDLClass(DictWrapper):
"""Schema holder for MySql data definition language that describes an MySql table."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.MySqlDDL")
def __init__(self,
tableSchema: str,
):
super().__init__()
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "MySqlDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tableSchema = str()
@property
def tableSchema(self) -> str:
"""Getter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
self._inner_dict['tableSchema'] = value
class NullTypeClass(DictWrapper):
"""Null field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.NullType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "NullTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class NumberTypeClass(DictWrapper):
"""Number data type: long, integer, short, etc.."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.NumberType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "NumberTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class OracleDDLClass(DictWrapper):
"""Schema holder for oracle data definition language that describes an oracle table."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OracleDDL")
def __init__(self,
tableSchema: str,
):
super().__init__()
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "OracleDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tableSchema = str()
@property
def tableSchema(self) -> str:
"""Getter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
self._inner_dict['tableSchema'] = value
class OrcSchemaClass(DictWrapper):
"""Schema text of an ORC schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OrcSchema")
def __init__(self,
schema: str,
):
super().__init__()
self.schema = schema
@classmethod
def construct_with_defaults(cls) -> "OrcSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.schema = str()
@property
def schema(self) -> str:
"""Getter: The native schema for ORC file format."""
return self._inner_dict.get('schema') # type: ignore
@schema.setter
def schema(self, value: str) -> None:
"""Setter: The native schema for ORC file format."""
self._inner_dict['schema'] = value
class OtherSchemaClass(DictWrapper):
"""Schema holder for undefined schema types."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OtherSchema")
def __init__(self,
rawSchema: str,
):
super().__init__()
self.rawSchema = rawSchema
@classmethod
def construct_with_defaults(cls) -> "OtherSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.rawSchema = str()
@property
def rawSchema(self) -> str:
"""Getter: The native schema in the dataset's platform."""
return self._inner_dict.get('rawSchema') # type: ignore
@rawSchema.setter
def rawSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform."""
self._inner_dict['rawSchema'] = value
class PrestoDDLClass(DictWrapper):
"""Schema holder for presto data definition language that describes a presto view."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.PrestoDDL")
def __init__(self,
rawSchema: str,
):
super().__init__()
self.rawSchema = rawSchema
@classmethod
def construct_with_defaults(cls) -> "PrestoDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.rawSchema = str()
@property
def rawSchema(self) -> str:
"""Getter: The raw schema in the dataset's platform. This includes the DDL and the columns extracted from DDL."""
return self._inner_dict.get('rawSchema') # type: ignore
@rawSchema.setter
def rawSchema(self, value: str) -> None:
"""Setter: The raw schema in the dataset's platform. This includes the DDL and the columns extracted from DDL."""
self._inner_dict['rawSchema'] = value
class RecordTypeClass(DictWrapper):
"""Record field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.RecordType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "RecordTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class SchemaFieldClass(DictWrapper):
"""SchemaField to describe metadata related to dataset schema. Schema normalization rules: http://go/tms-schema"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.SchemaField")
def __init__(self,
fieldPath: str,
type: "SchemaFieldDataTypeClass",
nativeDataType: str,
jsonPath: Union[None, str]=None,
nullable: Optional[bool]=None,
description: Union[None, str]=None,
recursive: Optional[bool]=None,
globalTags: Union[None, "GlobalTagsClass"]=None,
glossaryTerms: Union[None, "GlossaryTermsClass"]=None,
):
super().__init__()
self.fieldPath = fieldPath
self.jsonPath = jsonPath
if nullable is None:
self.nullable = False
else:
self.nullable = nullable
self.description = description
self.type = type
self.nativeDataType = nativeDataType
if recursive is None:
self.recursive = False
else:
self.recursive = recursive
self.globalTags = globalTags
self.glossaryTerms = glossaryTerms
@classmethod
def construct_with_defaults(cls) -> "SchemaFieldClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.fieldPath = str()
self.jsonPath = self.RECORD_SCHEMA.field_map["jsonPath"].default
self.nullable = self.RECORD_SCHEMA.field_map["nullable"].default
self.description = self.RECORD_SCHEMA.field_map["description"].default
self.type = SchemaFieldDataTypeClass.construct_with_defaults()
self.nativeDataType = str()
self.recursive = self.RECORD_SCHEMA.field_map["recursive"].default
self.globalTags = self.RECORD_SCHEMA.field_map["globalTags"].default
self.glossaryTerms = self.RECORD_SCHEMA.field_map["glossaryTerms"].default
@property
def fieldPath(self) -> str:
"""Getter: Flattened name of the field. Field is computed from jsonPath field. For data translation rules refer to wiki page above."""
return self._inner_dict.get('fieldPath') # type: ignore
@fieldPath.setter
def fieldPath(self, value: str) -> None:
"""Setter: Flattened name of the field. Field is computed from jsonPath field. For data translation rules refer to wiki page above."""
self._inner_dict['fieldPath'] = value
@property
def jsonPath(self) -> Union[None, str]:
"""Getter: Flattened name of a field in JSON Path notation."""
return self._inner_dict.get('jsonPath') # type: ignore
@jsonPath.setter
def jsonPath(self, value: Union[None, str]) -> None:
"""Setter: Flattened name of a field in JSON Path notation."""
self._inner_dict['jsonPath'] = value
@property
def nullable(self) -> bool:
"""Getter: Indicates if this field is optional or nullable"""
return self._inner_dict.get('nullable') # type: ignore
@nullable.setter
def nullable(self, value: bool) -> None:
"""Setter: Indicates if this field is optional or nullable"""
self._inner_dict['nullable'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Description"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Description"""
self._inner_dict['description'] = value
@property
def type(self) -> "SchemaFieldDataTypeClass":
"""Getter: Platform independent field type of the field."""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: "SchemaFieldDataTypeClass") -> None:
"""Setter: Platform independent field type of the field."""
self._inner_dict['type'] = value
@property
def nativeDataType(self) -> str:
"""Getter: The native type of the field in the dataset's platform as declared by platform schema."""
return self._inner_dict.get('nativeDataType') # type: ignore
@nativeDataType.setter
def nativeDataType(self, value: str) -> None:
"""Setter: The native type of the field in the dataset's platform as declared by platform schema."""
self._inner_dict['nativeDataType'] = value
@property
def recursive(self) -> bool:
"""Getter: There are use cases when a field in type B references type A. A field in A references field of type B. In such cases, we will mark the first field as recursive."""
return self._inner_dict.get('recursive') # type: ignore
@recursive.setter
def recursive(self, value: bool) -> None:
"""Setter: There are use cases when a field in type B references type A. A field in A references field of type B. In such cases, we will mark the first field as recursive."""
self._inner_dict['recursive'] = value
@property
def globalTags(self) -> Union[None, "GlobalTagsClass"]:
"""Getter: Tags associated with the field"""
return self._inner_dict.get('globalTags') # type: ignore
@globalTags.setter
def globalTags(self, value: Union[None, "GlobalTagsClass"]) -> None:
"""Setter: Tags associated with the field"""
self._inner_dict['globalTags'] = value
@property
def glossaryTerms(self) -> Union[None, "GlossaryTermsClass"]:
"""Getter: Glossary terms associated with the field"""
return self._inner_dict.get('glossaryTerms') # type: ignore
@glossaryTerms.setter
def glossaryTerms(self, value: Union[None, "GlossaryTermsClass"]) -> None:
"""Setter: Glossary terms associated with the field"""
self._inner_dict['glossaryTerms'] = value
class SchemaFieldDataTypeClass(DictWrapper):
"""Schema field data types"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.SchemaFieldDataType")
def __init__(self,
type: Union["BooleanTypeClass", "FixedTypeClass", "StringTypeClass", "BytesTypeClass", "NumberTypeClass", "DateTypeClass", "TimeTypeClass", "EnumTypeClass", "NullTypeClass", "MapTypeClass", "ArrayTypeClass", "UnionTypeClass", "RecordTypeClass"],
):
super().__init__()
self.type = type
@classmethod
def construct_with_defaults(cls) -> "SchemaFieldDataTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.type = BooleanTypeClass.construct_with_defaults()
@property
def type(self) -> Union["BooleanTypeClass", "FixedTypeClass", "StringTypeClass", "BytesTypeClass", "NumberTypeClass", "DateTypeClass", "TimeTypeClass", "EnumTypeClass", "NullTypeClass", "MapTypeClass", "ArrayTypeClass", "UnionTypeClass", "RecordTypeClass"]:
"""Getter: Data platform specific types"""
return self._inner_dict.get('type') # type: ignore
@type.setter
def type(self, value: Union["BooleanTypeClass", "FixedTypeClass", "StringTypeClass", "BytesTypeClass", "NumberTypeClass", "DateTypeClass", "TimeTypeClass", "EnumTypeClass", "NullTypeClass", "MapTypeClass", "ArrayTypeClass", "UnionTypeClass", "RecordTypeClass"]) -> None:
"""Setter: Data platform specific types"""
self._inner_dict['type'] = value
class SchemaMetadataClass(DictWrapper):
"""SchemaMetadata to describe metadata related to store schema"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.SchemaMetadata")
def __init__(self,
schemaName: str,
platform: str,
version: int,
created: "AuditStampClass",
lastModified: "AuditStampClass",
hash: str,
platformSchema: Union["EspressoSchemaClass", "OracleDDLClass", "MySqlDDLClass", "PrestoDDLClass", "KafkaSchemaClass", "BinaryJsonSchemaClass", "OrcSchemaClass", "SchemalessClass", "KeyValueSchemaClass", "OtherSchemaClass"],
fields: List["SchemaFieldClass"],
deleted: Union[None, "AuditStampClass"]=None,
dataset: Union[None, str]=None,
cluster: Union[None, str]=None,
primaryKeys: Union[None, List[str]]=None,
foreignKeysSpecs: Union[None, Dict[str, "ForeignKeySpecClass"]]=None,
):
super().__init__()
self.schemaName = schemaName
self.platform = platform
self.version = version
self.created = created
self.lastModified = lastModified
self.deleted = deleted
self.dataset = dataset
self.cluster = cluster
self.hash = hash
self.platformSchema = platformSchema
self.fields = fields
self.primaryKeys = primaryKeys
self.foreignKeysSpecs = foreignKeysSpecs
@classmethod
def construct_with_defaults(cls) -> "SchemaMetadataClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.schemaName = str()
self.platform = str()
self.version = int()
self.created = AuditStampClass.construct_with_defaults()
self.lastModified = AuditStampClass.construct_with_defaults()
self.deleted = self.RECORD_SCHEMA.field_map["deleted"].default
self.dataset = self.RECORD_SCHEMA.field_map["dataset"].default
self.cluster = self.RECORD_SCHEMA.field_map["cluster"].default
self.hash = str()
self.platformSchema = EspressoSchemaClass.construct_with_defaults()
self.fields = list()
self.primaryKeys = self.RECORD_SCHEMA.field_map["primaryKeys"].default
self.foreignKeysSpecs = self.RECORD_SCHEMA.field_map["foreignKeysSpecs"].default
@property
def schemaName(self) -> str:
"""Getter: Schema name e.g. PageViewEvent, identity.Profile, ams.account_management_tracking"""
return self._inner_dict.get('schemaName') # type: ignore
@schemaName.setter
def schemaName(self, value: str) -> None:
"""Setter: Schema name e.g. PageViewEvent, identity.Profile, ams.account_management_tracking"""
self._inner_dict['schemaName'] = value
@property
def platform(self) -> str:
"""Getter: Standardized platform urn where schema is defined. The data platform Urn (urn:li:platform:{platform_name})"""
return self._inner_dict.get('platform') # type: ignore
@platform.setter
def platform(self, value: str) -> None:
"""Setter: Standardized platform urn where schema is defined. The data platform Urn (urn:li:platform:{platform_name})"""
self._inner_dict['platform'] = value
@property
def version(self) -> int:
"""Getter: Every change to SchemaMetadata in the resource results in a new version. Version is server assigned. This version is differ from platform native schema version."""
return self._inner_dict.get('version') # type: ignore
@version.setter
def version(self, value: int) -> None:
"""Setter: Every change to SchemaMetadata in the resource results in a new version. Version is server assigned. This version is differ from platform native schema version."""
self._inner_dict['version'] = value
@property
def created(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
return self._inner_dict.get('created') # type: ignore
@created.setter
def created(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the creation of this resource/association/sub-resource"""
self._inner_dict['created'] = value
@property
def lastModified(self) -> "AuditStampClass":
"""Getter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
return self._inner_dict.get('lastModified') # type: ignore
@lastModified.setter
def lastModified(self, value: "AuditStampClass") -> None:
"""Setter: An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created"""
self._inner_dict['lastModified'] = value
@property
def deleted(self) -> Union[None, "AuditStampClass"]:
"""Getter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
return self._inner_dict.get('deleted') # type: ignore
@deleted.setter
def deleted(self, value: Union[None, "AuditStampClass"]) -> None:
"""Setter: An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."""
self._inner_dict['deleted'] = value
@property
def dataset(self) -> Union[None, str]:
"""Getter: Dataset this schema metadata is associated with."""
return self._inner_dict.get('dataset') # type: ignore
@dataset.setter
def dataset(self, value: Union[None, str]) -> None:
"""Setter: Dataset this schema metadata is associated with."""
self._inner_dict['dataset'] = value
@property
def cluster(self) -> Union[None, str]:
"""Getter: The cluster this schema metadata resides from"""
return self._inner_dict.get('cluster') # type: ignore
@cluster.setter
def cluster(self, value: Union[None, str]) -> None:
"""Setter: The cluster this schema metadata resides from"""
self._inner_dict['cluster'] = value
@property
def hash(self) -> str:
"""Getter: the SHA1 hash of the schema content"""
return self._inner_dict.get('hash') # type: ignore
@hash.setter
def hash(self, value: str) -> None:
"""Setter: the SHA1 hash of the schema content"""
self._inner_dict['hash'] = value
@property
def platformSchema(self) -> Union["EspressoSchemaClass", "OracleDDLClass", "MySqlDDLClass", "PrestoDDLClass", "KafkaSchemaClass", "BinaryJsonSchemaClass", "OrcSchemaClass", "SchemalessClass", "KeyValueSchemaClass", "OtherSchemaClass"]:
"""Getter: The native schema in the dataset's platform."""
return self._inner_dict.get('platformSchema') # type: ignore
@platformSchema.setter
def platformSchema(self, value: Union["EspressoSchemaClass", "OracleDDLClass", "MySqlDDLClass", "PrestoDDLClass", "KafkaSchemaClass", "BinaryJsonSchemaClass", "OrcSchemaClass", "SchemalessClass", "KeyValueSchemaClass", "OtherSchemaClass"]) -> None:
"""Setter: The native schema in the dataset's platform."""
self._inner_dict['platformSchema'] = value
@property
def fields(self) -> List["SchemaFieldClass"]:
"""Getter: Client provided a list of fields from document schema."""
return self._inner_dict.get('fields') # type: ignore
@fields.setter
def fields(self, value: List["SchemaFieldClass"]) -> None:
"""Setter: Client provided a list of fields from document schema."""
self._inner_dict['fields'] = value
@property
def primaryKeys(self) -> Union[None, List[str]]:
"""Getter: Client provided list of fields that define primary keys to access record. Field order defines hierarchical espresso keys. Empty lists indicates absence of primary key access patter. Value is a SchemaField@fieldPath."""
return self._inner_dict.get('primaryKeys') # type: ignore
@primaryKeys.setter
def primaryKeys(self, value: Union[None, List[str]]) -> None:
"""Setter: Client provided list of fields that define primary keys to access record. Field order defines hierarchical espresso keys. Empty lists indicates absence of primary key access patter. Value is a SchemaField@fieldPath."""
self._inner_dict['primaryKeys'] = value
@property
def foreignKeysSpecs(self) -> Union[None, Dict[str, "ForeignKeySpecClass"]]:
"""Getter: Map captures all the references schema makes to external datasets. Map key is ForeignKeySpecName typeref."""
return self._inner_dict.get('foreignKeysSpecs') # type: ignore
@foreignKeysSpecs.setter
def foreignKeysSpecs(self, value: Union[None, Dict[str, "ForeignKeySpecClass"]]) -> None:
"""Setter: Map captures all the references schema makes to external datasets. Map key is ForeignKeySpecName typeref."""
self._inner_dict['foreignKeysSpecs'] = value
class SchemalessClass(DictWrapper):
"""The dataset has no specific schema associated with it"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.Schemaless")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "SchemalessClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class StringTypeClass(DictWrapper):
"""String field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.StringType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "StringTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class TimeTypeClass(DictWrapper):
"""Time field type. This should also be used for datetimes."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.TimeType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "TimeTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class UnionTypeClass(DictWrapper):
"""Union field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.UnionType")
def __init__(self,
nestedTypes: Union[None, List[str]]=None,
):
super().__init__()
self.nestedTypes = nestedTypes
@classmethod
def construct_with_defaults(cls) -> "UnionTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.nestedTypes = self.RECORD_SCHEMA.field_map["nestedTypes"].default
@property
def nestedTypes(self) -> Union[None, List[str]]:
"""Getter: List of types in union type."""
return self._inner_dict.get('nestedTypes') # type: ignore
@nestedTypes.setter
def nestedTypes(self, value: Union[None, List[str]]) -> None:
"""Setter: List of types in union type."""
self._inner_dict['nestedTypes'] = value
class UrnForeignKeyClass(DictWrapper):
"""If SchemaMetadata fields make any external references and references are of type com.linkedin.pegasus2avro.common.Urn or any children, this models can be used to mark it."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.UrnForeignKey")
def __init__(self,
currentFieldPath: str,
):
super().__init__()
self.currentFieldPath = currentFieldPath
@classmethod
def construct_with_defaults(cls) -> "UrnForeignKeyClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.currentFieldPath = str()
@property
def currentFieldPath(self) -> str:
"""Getter: Field in hosting(current) SchemaMetadata."""
return self._inner_dict.get('currentFieldPath') # type: ignore
@currentFieldPath.setter
def currentFieldPath(self, value: str) -> None:
"""Setter: Field in hosting(current) SchemaMetadata."""
self._inner_dict['currentFieldPath'] = value
class TagPropertiesClass(DictWrapper):
"""Properties associated with a Tag"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.tag.TagProperties")
def __init__(self,
name: str,
description: Union[None, str]=None,
):
super().__init__()
self.name = name
self.description = description
@classmethod
def construct_with_defaults(cls) -> "TagPropertiesClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.name = str()
self.description = self.RECORD_SCHEMA.field_map["description"].default
@property
def name(self) -> str:
"""Getter: Name of the tag"""
return self._inner_dict.get('name') # type: ignore
@name.setter
def name(self, value: str) -> None:
"""Setter: Name of the tag"""
self._inner_dict['name'] = value
@property
def description(self) -> Union[None, str]:
"""Getter: Documentation of the tag"""
return self._inner_dict.get('description') # type: ignore
@description.setter
def description(self, value: Union[None, str]) -> None:
"""Setter: Documentation of the tag"""
self._inner_dict['description'] = value
__SCHEMA_TYPES = {
'com.linkedin.events.KafkaAuditHeader': KafkaAuditHeaderClass,
'com.linkedin.pegasus2avro.chart.ChartInfo': ChartInfoClass,
'com.linkedin.pegasus2avro.chart.ChartQuery': ChartQueryClass,
'com.linkedin.pegasus2avro.chart.ChartQueryType': ChartQueryTypeClass,
'com.linkedin.pegasus2avro.chart.ChartType': ChartTypeClass,
'com.linkedin.pegasus2avro.chart.EditableChartProperties': EditableChartPropertiesClass,
'com.linkedin.pegasus2avro.common.AccessLevel': AccessLevelClass,
'com.linkedin.pegasus2avro.common.AuditStamp': AuditStampClass,
'com.linkedin.pegasus2avro.common.BrowsePaths': BrowsePathsClass,
'com.linkedin.pegasus2avro.common.ChangeAuditStamps': ChangeAuditStampsClass,
'com.linkedin.pegasus2avro.common.Cost': CostClass,
'com.linkedin.pegasus2avro.common.CostCost': CostCostClass,
'com.linkedin.pegasus2avro.common.CostCostDiscriminator': CostCostDiscriminatorClass,
'com.linkedin.pegasus2avro.common.CostType': CostTypeClass,
'com.linkedin.pegasus2avro.common.Deprecation': DeprecationClass,
'com.linkedin.pegasus2avro.common.FabricType': FabricTypeClass,
'com.linkedin.pegasus2avro.common.GlobalTags': GlobalTagsClass,
'com.linkedin.pegasus2avro.common.GlossaryTermAssociation': GlossaryTermAssociationClass,
'com.linkedin.pegasus2avro.common.GlossaryTerms': GlossaryTermsClass,
'com.linkedin.pegasus2avro.common.InstitutionalMemory': InstitutionalMemoryClass,
'com.linkedin.pegasus2avro.common.InstitutionalMemoryMetadata': InstitutionalMemoryMetadataClass,
'com.linkedin.pegasus2avro.common.MLFeatureDataType': MLFeatureDataTypeClass,
'com.linkedin.pegasus2avro.common.Owner': OwnerClass,
'com.linkedin.pegasus2avro.common.Ownership': OwnershipClass,
'com.linkedin.pegasus2avro.common.OwnershipSource': OwnershipSourceClass,
'com.linkedin.pegasus2avro.common.OwnershipSourceType': OwnershipSourceTypeClass,
'com.linkedin.pegasus2avro.common.OwnershipType': OwnershipTypeClass,
'com.linkedin.pegasus2avro.common.Status': StatusClass,
'com.linkedin.pegasus2avro.common.TagAssociation': TagAssociationClass,
'com.linkedin.pegasus2avro.common.VersionTag': VersionTagClass,
'com.linkedin.pegasus2avro.common.fieldtransformer.TransformationType': TransformationTypeClass,
'com.linkedin.pegasus2avro.common.fieldtransformer.UDFTransformer': UDFTransformerClass,
'com.linkedin.pegasus2avro.dashboard.DashboardInfo': DashboardInfoClass,
'com.linkedin.pegasus2avro.dashboard.EditableDashboardProperties': EditableDashboardPropertiesClass,
'com.linkedin.pegasus2avro.datajob.DataFlowInfo': DataFlowInfoClass,
'com.linkedin.pegasus2avro.datajob.DataJobInfo': DataJobInfoClass,
'com.linkedin.pegasus2avro.datajob.DataJobInputOutput': DataJobInputOutputClass,
'com.linkedin.pegasus2avro.datajob.EditableDataFlowProperties': EditableDataFlowPropertiesClass,
'com.linkedin.pegasus2avro.datajob.EditableDataJobProperties': EditableDataJobPropertiesClass,
'com.linkedin.pegasus2avro.datajob.azkaban.AzkabanJobType': AzkabanJobTypeClass,
'com.linkedin.pegasus2avro.dataplatform.DataPlatformInfo': DataPlatformInfoClass,
'com.linkedin.pegasus2avro.dataplatform.PlatformType': PlatformTypeClass,
'com.linkedin.pegasus2avro.dataprocess.DataProcessInfo': DataProcessInfoClass,
'com.linkedin.pegasus2avro.dataset.DatasetDeprecation': DatasetDeprecationClass,
'com.linkedin.pegasus2avro.dataset.DatasetFieldMapping': DatasetFieldMappingClass,
'com.linkedin.pegasus2avro.dataset.DatasetLineageType': DatasetLineageTypeClass,
'com.linkedin.pegasus2avro.dataset.DatasetProperties': DatasetPropertiesClass,
'com.linkedin.pegasus2avro.dataset.DatasetUpstreamLineage': DatasetUpstreamLineageClass,
'com.linkedin.pegasus2avro.dataset.EditableDatasetProperties': EditableDatasetPropertiesClass,
'com.linkedin.pegasus2avro.dataset.Upstream': UpstreamClass,
'com.linkedin.pegasus2avro.dataset.UpstreamLineage': UpstreamLineageClass,
'com.linkedin.pegasus2avro.glossary.GlossaryNodeInfo': GlossaryNodeInfoClass,
'com.linkedin.pegasus2avro.glossary.GlossaryTermInfo': GlossaryTermInfoClass,
'com.linkedin.pegasus2avro.identity.CorpGroupInfo': CorpGroupInfoClass,
'com.linkedin.pegasus2avro.identity.CorpUserEditableInfo': CorpUserEditableInfoClass,
'com.linkedin.pegasus2avro.identity.CorpUserInfo': CorpUserInfoClass,
'com.linkedin.pegasus2avro.metadata.key.ChartKey': ChartKeyClass,
'com.linkedin.pegasus2avro.metadata.key.CorpGroupKey': CorpGroupKeyClass,
'com.linkedin.pegasus2avro.metadata.key.CorpUserKey': CorpUserKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DashboardKey': DashboardKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DataFlowKey': DataFlowKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DataJobKey': DataJobKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DataPlatformKey': DataPlatformKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DataProcessKey': DataProcessKeyClass,
'com.linkedin.pegasus2avro.metadata.key.DatasetKey': DatasetKeyClass,
'com.linkedin.pegasus2avro.metadata.key.GlossaryNodeKey': GlossaryNodeKeyClass,
'com.linkedin.pegasus2avro.metadata.key.GlossaryTermKey': GlossaryTermKeyClass,
'com.linkedin.pegasus2avro.metadata.key.MLFeatureKey': MLFeatureKeyClass,
'com.linkedin.pegasus2avro.metadata.key.MLFeatureTableKey': MLFeatureTableKeyClass,
'com.linkedin.pegasus2avro.metadata.key.MLModelKey': MLModelKeyClass,
'com.linkedin.pegasus2avro.metadata.key.MLPrimaryKeyKey': MLPrimaryKeyKeyClass,
'com.linkedin.pegasus2avro.metadata.key.TagKey': TagKeyClass,
'com.linkedin.pegasus2avro.metadata.snapshot.ChartSnapshot': ChartSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.CorpGroupSnapshot': CorpGroupSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.CorpUserSnapshot': CorpUserSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DashboardSnapshot': DashboardSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DataFlowSnapshot': DataFlowSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DataJobSnapshot': DataJobSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DataPlatformSnapshot': DataPlatformSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DataProcessSnapshot': DataProcessSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot': DatasetSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.GlossaryNodeSnapshot': GlossaryNodeSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.GlossaryTermSnapshot': GlossaryTermSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.MLFeatureSnapshot': MLFeatureSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.MLFeatureTableSnapshot': MLFeatureTableSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.MLModelSnapshot': MLModelSnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.MLPrimaryKeySnapshot': MLPrimaryKeySnapshotClass,
'com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot': TagSnapshotClass,
'com.linkedin.pegasus2avro.ml.metadata.BaseData': BaseDataClass,
'com.linkedin.pegasus2avro.ml.metadata.CaveatDetails': CaveatDetailsClass,
'com.linkedin.pegasus2avro.ml.metadata.CaveatsAndRecommendations': CaveatsAndRecommendationsClass,
'com.linkedin.pegasus2avro.ml.metadata.EthicalConsiderations': EthicalConsiderationsClass,
'com.linkedin.pegasus2avro.ml.metadata.EvaluationData': EvaluationDataClass,
'com.linkedin.pegasus2avro.ml.metadata.IntendedUse': IntendedUseClass,
'com.linkedin.pegasus2avro.ml.metadata.IntendedUserType': IntendedUserTypeClass,
'com.linkedin.pegasus2avro.ml.metadata.MLFeatureProperties': MLFeaturePropertiesClass,
'com.linkedin.pegasus2avro.ml.metadata.MLFeatureTableProperties': MLFeatureTablePropertiesClass,
'com.linkedin.pegasus2avro.ml.metadata.MLModelFactorPrompts': MLModelFactorPromptsClass,
'com.linkedin.pegasus2avro.ml.metadata.MLModelFactors': MLModelFactorsClass,
'com.linkedin.pegasus2avro.ml.metadata.MLModelProperties': MLModelPropertiesClass,
'com.linkedin.pegasus2avro.ml.metadata.MLPrimaryKeyProperties': MLPrimaryKeyPropertiesClass,
'com.linkedin.pegasus2avro.ml.metadata.Metrics': MetricsClass,
'com.linkedin.pegasus2avro.ml.metadata.QuantitativeAnalyses': QuantitativeAnalysesClass,
'com.linkedin.pegasus2avro.ml.metadata.SourceCode': SourceCodeClass,
'com.linkedin.pegasus2avro.ml.metadata.SourceCodeUrl': SourceCodeUrlClass,
'com.linkedin.pegasus2avro.ml.metadata.SourceCodeUrlType': SourceCodeUrlTypeClass,
'com.linkedin.pegasus2avro.ml.metadata.TrainingData': TrainingDataClass,
'com.linkedin.pegasus2avro.mxe.MetadataAuditEvent': MetadataAuditEventClass,
'com.linkedin.pegasus2avro.mxe.MetadataChangeEvent': MetadataChangeEventClass,
'com.linkedin.pegasus2avro.schema.ArrayType': ArrayTypeClass,
'com.linkedin.pegasus2avro.schema.BinaryJsonSchema': BinaryJsonSchemaClass,
'com.linkedin.pegasus2avro.schema.BooleanType': BooleanTypeClass,
'com.linkedin.pegasus2avro.schema.BytesType': BytesTypeClass,
'com.linkedin.pegasus2avro.schema.DatasetFieldForeignKey': DatasetFieldForeignKeyClass,
'com.linkedin.pegasus2avro.schema.DateType': DateTypeClass,
'com.linkedin.pegasus2avro.schema.EditableSchemaFieldInfo': EditableSchemaFieldInfoClass,
'com.linkedin.pegasus2avro.schema.EditableSchemaMetadata': EditableSchemaMetadataClass,
'com.linkedin.pegasus2avro.schema.EnumType': EnumTypeClass,
'com.linkedin.pegasus2avro.schema.EspressoSchema': EspressoSchemaClass,
'com.linkedin.pegasus2avro.schema.FixedType': FixedTypeClass,
'com.linkedin.pegasus2avro.schema.ForeignKeySpec': ForeignKeySpecClass,
'com.linkedin.pegasus2avro.schema.KafkaSchema': KafkaSchemaClass,
'com.linkedin.pegasus2avro.schema.KeyValueSchema': KeyValueSchemaClass,
'com.linkedin.pegasus2avro.schema.MapType': MapTypeClass,
'com.linkedin.pegasus2avro.schema.MySqlDDL': MySqlDDLClass,
'com.linkedin.pegasus2avro.schema.NullType': NullTypeClass,
'com.linkedin.pegasus2avro.schema.NumberType': NumberTypeClass,
'com.linkedin.pegasus2avro.schema.OracleDDL': OracleDDLClass,
'com.linkedin.pegasus2avro.schema.OrcSchema': OrcSchemaClass,
'com.linkedin.pegasus2avro.schema.OtherSchema': OtherSchemaClass,
'com.linkedin.pegasus2avro.schema.PrestoDDL': PrestoDDLClass,
'com.linkedin.pegasus2avro.schema.RecordType': RecordTypeClass,
'com.linkedin.pegasus2avro.schema.SchemaField': SchemaFieldClass,
'com.linkedin.pegasus2avro.schema.SchemaFieldDataType': SchemaFieldDataTypeClass,
'com.linkedin.pegasus2avro.schema.SchemaMetadata': SchemaMetadataClass,
'com.linkedin.pegasus2avro.schema.Schemaless': SchemalessClass,
'com.linkedin.pegasus2avro.schema.StringType': StringTypeClass,
'com.linkedin.pegasus2avro.schema.TimeType': TimeTypeClass,
'com.linkedin.pegasus2avro.schema.UnionType': UnionTypeClass,
'com.linkedin.pegasus2avro.schema.UrnForeignKey': UrnForeignKeyClass,
'com.linkedin.pegasus2avro.tag.TagProperties': TagPropertiesClass,
'KafkaAuditHeader': KafkaAuditHeaderClass,
'ChartInfo': ChartInfoClass,
'ChartQuery': ChartQueryClass,
'ChartQueryType': ChartQueryTypeClass,
'ChartType': ChartTypeClass,
'EditableChartProperties': EditableChartPropertiesClass,
'AccessLevel': AccessLevelClass,
'AuditStamp': AuditStampClass,
'BrowsePaths': BrowsePathsClass,
'ChangeAuditStamps': ChangeAuditStampsClass,
'Cost': CostClass,
'CostCost': CostCostClass,
'CostCostDiscriminator': CostCostDiscriminatorClass,
'CostType': CostTypeClass,
'Deprecation': DeprecationClass,
'FabricType': FabricTypeClass,
'GlobalTags': GlobalTagsClass,
'GlossaryTermAssociation': GlossaryTermAssociationClass,
'GlossaryTerms': GlossaryTermsClass,
'InstitutionalMemory': InstitutionalMemoryClass,
'InstitutionalMemoryMetadata': InstitutionalMemoryMetadataClass,
'MLFeatureDataType': MLFeatureDataTypeClass,
'Owner': OwnerClass,
'Ownership': OwnershipClass,
'OwnershipSource': OwnershipSourceClass,
'OwnershipSourceType': OwnershipSourceTypeClass,
'OwnershipType': OwnershipTypeClass,
'Status': StatusClass,
'TagAssociation': TagAssociationClass,
'VersionTag': VersionTagClass,
'TransformationType': TransformationTypeClass,
'UDFTransformer': UDFTransformerClass,
'DashboardInfo': DashboardInfoClass,
'EditableDashboardProperties': EditableDashboardPropertiesClass,
'DataFlowInfo': DataFlowInfoClass,
'DataJobInfo': DataJobInfoClass,
'DataJobInputOutput': DataJobInputOutputClass,
'EditableDataFlowProperties': EditableDataFlowPropertiesClass,
'EditableDataJobProperties': EditableDataJobPropertiesClass,
'AzkabanJobType': AzkabanJobTypeClass,
'DataPlatformInfo': DataPlatformInfoClass,
'PlatformType': PlatformTypeClass,
'DataProcessInfo': DataProcessInfoClass,
'DatasetDeprecation': DatasetDeprecationClass,
'DatasetFieldMapping': DatasetFieldMappingClass,
'DatasetLineageType': DatasetLineageTypeClass,
'DatasetProperties': DatasetPropertiesClass,
'DatasetUpstreamLineage': DatasetUpstreamLineageClass,
'EditableDatasetProperties': EditableDatasetPropertiesClass,
'Upstream': UpstreamClass,
'UpstreamLineage': UpstreamLineageClass,
'GlossaryNodeInfo': GlossaryNodeInfoClass,
'GlossaryTermInfo': GlossaryTermInfoClass,
'CorpGroupInfo': CorpGroupInfoClass,
'CorpUserEditableInfo': CorpUserEditableInfoClass,
'CorpUserInfo': CorpUserInfoClass,
'ChartKey': ChartKeyClass,
'CorpGroupKey': CorpGroupKeyClass,
'CorpUserKey': CorpUserKeyClass,
'DashboardKey': DashboardKeyClass,
'DataFlowKey': DataFlowKeyClass,
'DataJobKey': DataJobKeyClass,
'DataPlatformKey': DataPlatformKeyClass,
'DataProcessKey': DataProcessKeyClass,
'DatasetKey': DatasetKeyClass,
'GlossaryNodeKey': GlossaryNodeKeyClass,
'GlossaryTermKey': GlossaryTermKeyClass,
'MLFeatureKey': MLFeatureKeyClass,
'MLFeatureTableKey': MLFeatureTableKeyClass,
'MLModelKey': MLModelKeyClass,
'MLPrimaryKeyKey': MLPrimaryKeyKeyClass,
'TagKey': TagKeyClass,
'ChartSnapshot': ChartSnapshotClass,
'CorpGroupSnapshot': CorpGroupSnapshotClass,
'CorpUserSnapshot': CorpUserSnapshotClass,
'DashboardSnapshot': DashboardSnapshotClass,
'DataFlowSnapshot': DataFlowSnapshotClass,
'DataJobSnapshot': DataJobSnapshotClass,
'DataPlatformSnapshot': DataPlatformSnapshotClass,
'DataProcessSnapshot': DataProcessSnapshotClass,
'DatasetSnapshot': DatasetSnapshotClass,
'GlossaryNodeSnapshot': GlossaryNodeSnapshotClass,
'GlossaryTermSnapshot': GlossaryTermSnapshotClass,
'MLFeatureSnapshot': MLFeatureSnapshotClass,
'MLFeatureTableSnapshot': MLFeatureTableSnapshotClass,
'MLModelSnapshot': MLModelSnapshotClass,
'MLPrimaryKeySnapshot': MLPrimaryKeySnapshotClass,
'TagSnapshot': TagSnapshotClass,
'BaseData': BaseDataClass,
'CaveatDetails': CaveatDetailsClass,
'CaveatsAndRecommendations': CaveatsAndRecommendationsClass,
'EthicalConsiderations': EthicalConsiderationsClass,
'EvaluationData': EvaluationDataClass,
'IntendedUse': IntendedUseClass,
'IntendedUserType': IntendedUserTypeClass,
'MLFeatureProperties': MLFeaturePropertiesClass,
'MLFeatureTableProperties': MLFeatureTablePropertiesClass,
'MLModelFactorPrompts': MLModelFactorPromptsClass,
'MLModelFactors': MLModelFactorsClass,
'MLModelProperties': MLModelPropertiesClass,
'MLPrimaryKeyProperties': MLPrimaryKeyPropertiesClass,
'Metrics': MetricsClass,
'QuantitativeAnalyses': QuantitativeAnalysesClass,
'SourceCode': SourceCodeClass,
'SourceCodeUrl': SourceCodeUrlClass,
'SourceCodeUrlType': SourceCodeUrlTypeClass,
'TrainingData': TrainingDataClass,
'MetadataAuditEvent': MetadataAuditEventClass,
'MetadataChangeEvent': MetadataChangeEventClass,
'ArrayType': ArrayTypeClass,
'BinaryJsonSchema': BinaryJsonSchemaClass,
'BooleanType': BooleanTypeClass,
'BytesType': BytesTypeClass,
'DatasetFieldForeignKey': DatasetFieldForeignKeyClass,
'DateType': DateTypeClass,
'EditableSchemaFieldInfo': EditableSchemaFieldInfoClass,
'EditableSchemaMetadata': EditableSchemaMetadataClass,
'EnumType': EnumTypeClass,
'EspressoSchema': EspressoSchemaClass,
'FixedType': FixedTypeClass,
'ForeignKeySpec': ForeignKeySpecClass,
'KafkaSchema': KafkaSchemaClass,
'KeyValueSchema': KeyValueSchemaClass,
'MapType': MapTypeClass,
'MySqlDDL': MySqlDDLClass,
'NullType': NullTypeClass,
'NumberType': NumberTypeClass,
'OracleDDL': OracleDDLClass,
'OrcSchema': OrcSchemaClass,
'OtherSchema': OtherSchemaClass,
'PrestoDDL': PrestoDDLClass,
'RecordType': RecordTypeClass,
'SchemaField': SchemaFieldClass,
'SchemaFieldDataType': SchemaFieldDataTypeClass,
'SchemaMetadata': SchemaMetadataClass,
'Schemaless': SchemalessClass,
'StringType': StringTypeClass,
'TimeType': TimeTypeClass,
'UnionType': UnionTypeClass,
'UrnForeignKey': UrnForeignKeyClass,
'TagProperties': TagPropertiesClass,
}
_json_converter = avrojson.AvroJsonConverter(use_logical_types=False, schema_types=__SCHEMA_TYPES)
# fmt: on
| 37.194952 | 530 | 0.654923 |
f1c5d5dc5807ac574886765f9ad2c34d866f0b9c | 26,451 | py | Python | meditation/meditation.py | tmfds/dfk | 91b6f95a4630b57deecf87cf4850b6576646c7d1 | [
"MIT"
] | null | null | null | meditation/meditation.py | tmfds/dfk | 91b6f95a4630b57deecf87cf4850b6576646c7d1 | [
"MIT"
] | null | null | null | meditation/meditation.py | tmfds/dfk | 91b6f95a4630b57deecf87cf4850b6576646c7d1 | [
"MIT"
] | null | null | null | from web3 import Web3
CONTRACT_ADDRESS = '0x0594d86b2923076a2316eaea4e1ca286daa142c1'
ABI = """
[
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"atunementItemAddress","type":"address"}],"name":"AttunementCrystalAdded","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":true,"internalType":"uint256","name":"heroId","type":"uint256"},{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"components":[{"internalType":"uint256","name":"summonedTime","type":"uint256"},{"internalType":"uint256","name":"nextSummonTime","type":"uint256"},{"internalType":"uint256","name":"summonerId","type":"uint256"},{"internalType":"uint256","name":"assistantId","type":"uint256"},{"internalType":"uint32","name":"summons","type":"uint32"},{"internalType":"uint32","name":"maxSummons","type":"uint32"}],"internalType":"struct IHeroTypes.SummoningInfo","name":"summoningInfo","type":"tuple"},{"components":[{"internalType":"uint256","name":"statGenes","type":"uint256"},{"internalType":"uint256","name":"visualGenes","type":"uint256"},{"internalType":"enum IHeroTypes.Rarity","name":"rarity","type":"uint8"},{"internalType":"bool","name":"shiny","type":"bool"},{"internalType":"uint16","name":"generation","type":"uint16"},{"internalType":"uint32","name":"firstName","type":"uint32"},{"internalType":"uint32","name":"lastName","type":"uint32"},{"internalType":"uint8","name":"shinyStyle","type":"uint8"},{"internalType":"uint8","name":"class","type":"uint8"},{"internalType":"uint8","name":"subClass","type":"uint8"}],"internalType":"struct IHeroTypes.HeroInfo","name":"info","type":"tuple"},{"components":[{"internalType":"uint256","name":"staminaFullAt","type":"uint256"},{"internalType":"uint256","name":"hpFullAt","type":"uint256"},{"internalType":"uint256","name":"mpFullAt","type":"uint256"},{"internalType":"uint16","name":"level","type":"uint16"},{"internalType":"uint64","name":"xp","type":"uint64"},{"internalType":"address","name":"currentQuest","type":"address"},{"internalType":"uint8","name":"sp","type":"uint8"},{"internalType":"enum IHeroTypes.HeroStatus","name":"status","type":"uint8"}],"internalType":"struct IHeroTypes.HeroState","name":"state","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hp","type":"uint16"},{"internalType":"uint16","name":"mp","type":"uint16"},{"internalType":"uint16","name":"stamina","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStats","name":"stats","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"primaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"secondaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"mining","type":"uint16"},{"internalType":"uint16","name":"gardening","type":"uint16"},{"internalType":"uint16","name":"foraging","type":"uint16"},{"internalType":"uint16","name":"fishing","type":"uint16"}],"internalType":"struct IHeroTypes.HeroProfessions","name":"professions","type":"tuple"}],"indexed":false,"internalType":"struct IHeroTypes.Hero","name":"hero","type":"tuple"},{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"components":[{"internalType":"uint256","name":"summonedTime","type":"uint256"},{"internalType":"uint256","name":"nextSummonTime","type":"uint256"},{"internalType":"uint256","name":"summonerId","type":"uint256"},{"internalType":"uint256","name":"assistantId","type":"uint256"},{"internalType":"uint32","name":"summons","type":"uint32"},{"internalType":"uint32","name":"maxSummons","type":"uint32"}],"internalType":"struct IHeroTypes.SummoningInfo","name":"summoningInfo","type":"tuple"},{"components":[{"internalType":"uint256","name":"statGenes","type":"uint256"},{"internalType":"uint256","name":"visualGenes","type":"uint256"},{"internalType":"enum IHeroTypes.Rarity","name":"rarity","type":"uint8"},{"internalType":"bool","name":"shiny","type":"bool"},{"internalType":"uint16","name":"generation","type":"uint16"},{"internalType":"uint32","name":"firstName","type":"uint32"},{"internalType":"uint32","name":"lastName","type":"uint32"},{"internalType":"uint8","name":"shinyStyle","type":"uint8"},{"internalType":"uint8","name":"class","type":"uint8"},{"internalType":"uint8","name":"subClass","type":"uint8"}],"internalType":"struct IHeroTypes.HeroInfo","name":"info","type":"tuple"},{"components":[{"internalType":"uint256","name":"staminaFullAt","type":"uint256"},{"internalType":"uint256","name":"hpFullAt","type":"uint256"},{"internalType":"uint256","name":"mpFullAt","type":"uint256"},{"internalType":"uint16","name":"level","type":"uint16"},{"internalType":"uint64","name":"xp","type":"uint64"},{"internalType":"address","name":"currentQuest","type":"address"},{"internalType":"uint8","name":"sp","type":"uint8"},{"internalType":"enum IHeroTypes.HeroStatus","name":"status","type":"uint8"}],"internalType":"struct IHeroTypes.HeroState","name":"state","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hp","type":"uint16"},{"internalType":"uint16","name":"mp","type":"uint16"},{"internalType":"uint16","name":"stamina","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStats","name":"stats","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"primaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"strength","type":"uint16"},{"internalType":"uint16","name":"intelligence","type":"uint16"},{"internalType":"uint16","name":"wisdom","type":"uint16"},{"internalType":"uint16","name":"luck","type":"uint16"},{"internalType":"uint16","name":"agility","type":"uint16"},{"internalType":"uint16","name":"vitality","type":"uint16"},{"internalType":"uint16","name":"endurance","type":"uint16"},{"internalType":"uint16","name":"dexterity","type":"uint16"},{"internalType":"uint16","name":"hpSm","type":"uint16"},{"internalType":"uint16","name":"hpRg","type":"uint16"},{"internalType":"uint16","name":"hpLg","type":"uint16"},{"internalType":"uint16","name":"mpSm","type":"uint16"},{"internalType":"uint16","name":"mpRg","type":"uint16"},{"internalType":"uint16","name":"mpLg","type":"uint16"}],"internalType":"struct IHeroTypes.HeroStatGrowth","name":"secondaryStatGrowth","type":"tuple"},{"components":[{"internalType":"uint16","name":"mining","type":"uint16"},{"internalType":"uint16","name":"gardening","type":"uint16"},{"internalType":"uint16","name":"foraging","type":"uint16"},{"internalType":"uint16","name":"fishing","type":"uint16"}],"internalType":"struct IHeroTypes.HeroProfessions","name":"professions","type":"tuple"}],"indexed":false,"internalType":"struct IHeroTypes.Hero","name":"oldHero","type":"tuple"}],"name":"LevelUp","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":true,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"meditationId","type":"uint256"},{"indexed":false,"internalType":"uint8","name":"primaryStat","type":"uint8"},{"indexed":false,"internalType":"uint8","name":"secondaryStat","type":"uint8"},{"indexed":false,"internalType":"uint8","name":"tertiaryStat","type":"uint8"},{"indexed":false,"internalType":"address","name":"attunementCrystal","type":"address"}],"name":"MeditationBegun","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":true,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"meditationId","type":"uint256"}],"name":"MeditationCompleted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Paused","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"previousAdminRole","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"newAdminRole","type":"bytes32"}],"name":"RoleAdminChanged","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleGranted","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleRevoked","type":"event"},
{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"player","type":"address"},{"indexed":true,"internalType":"uint256","name":"heroId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"stat","type":"uint256"},{"indexed":false,"internalType":"uint8","name":"increase","type":"uint8"},{"indexed":false,"internalType":"enum MeditationCircle.UpdateType","name":"updateType","type":"uint8"}],"name":"StatUp","type":"event"},
{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"account","type":"address"}],"name":"Unpaused","type":"event"},{"inputs":[],"name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"MODERATOR_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint16","name":"_level","type":"uint16"}],"name":"_getRequiredRunes","outputs":[{"internalType":"uint16[10]","name":"","type":"uint16[10]"}],"stateMutability":"pure","type":"function"},
{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"activeAttunementCrystals","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"_address","type":"address"}],"name":"addAttunementCrystal","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"}],"name":"completeMeditation","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"randomNumber","type":"uint256"},{"internalType":"uint256","name":"digits","type":"uint256"},{"internalType":"uint256","name":"offset","type":"uint256"}],"name":"extractNumber","outputs":[{"internalType":"uint256","name":"result","type":"uint256"}],"stateMutability":"pure","type":"function"},
{"inputs":[{"internalType":"address","name":"_address","type":"address"}],"name":"getActiveMeditations","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"player","type":"address"},{"internalType":"uint256","name":"heroId","type":"uint256"},{"internalType":"uint8","name":"primaryStat","type":"uint8"},{"internalType":"uint8","name":"secondaryStat","type":"uint8"},{"internalType":"uint8","name":"tertiaryStat","type":"uint8"},{"internalType":"address","name":"attunementCrystal","type":"address"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint8","name":"status","type":"uint8"}],"internalType":"struct MeditationCircle.Meditation[]","name":"","type":"tuple[]"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"}],"name":"getHeroMeditation","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"player","type":"address"},{"internalType":"uint256","name":"heroId","type":"uint256"},{"internalType":"uint8","name":"primaryStat","type":"uint8"},{"internalType":"uint8","name":"secondaryStat","type":"uint8"},{"internalType":"uint8","name":"tertiaryStat","type":"uint8"},{"internalType":"address","name":"attunementCrystal","type":"address"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint8","name":"status","type":"uint8"}],"internalType":"struct MeditationCircle.Meditation","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_id","type":"uint256"}],"name":"getMeditation","outputs":[{"components":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"player","type":"address"},{"internalType":"uint256","name":"heroId","type":"uint256"},{"internalType":"uint8","name":"primaryStat","type":"uint8"},{"internalType":"uint8","name":"secondaryStat","type":"uint8"},{"internalType":"uint8","name":"tertiaryStat","type":"uint8"},{"internalType":"address","name":"attunementCrystal","type":"address"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint8","name":"status","type":"uint8"}],"internalType":"struct MeditationCircle.Meditation","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"grantRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"heroToMeditation","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"_heroCoreAddress","type":"address"},{"internalType":"address","name":"_statScienceAddress","type":"address"},{"internalType":"address","name":"_jewelTokenAddress","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"jewelToken","outputs":[{"internalType":"contract IJewelToken","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"pause","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"paused","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"profileActiveMeditations","outputs":[{"internalType":"uint256","name":"id","type":"uint256"},{"internalType":"address","name":"player","type":"address"},{"internalType":"uint256","name":"heroId","type":"uint256"},{"internalType":"uint8","name":"primaryStat","type":"uint8"},{"internalType":"uint8","name":"secondaryStat","type":"uint8"},{"internalType":"uint8","name":"tertiaryStat","type":"uint8"},{"internalType":"address","name":"attunementCrystal","type":"address"},{"internalType":"uint256","name":"startBlock","type":"uint256"},{"internalType":"uint8","name":"status","type":"uint8"}],"stateMutability":"view","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"renounceRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"revokeRole","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"address[]","name":"_feeAddresses","type":"address[]"},{"internalType":"uint256[]","name":"_feePercents","type":"uint256[]"}],"name":"setFees","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint8","name":"_index","type":"uint8"},{"internalType":"address","name":"_address","type":"address"}],"name":"setRune","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"_heroId","type":"uint256"},{"internalType":"uint8","name":"_primaryStat","type":"uint8"},{"internalType":"uint8","name":"_secondaryStat","type":"uint8"},{"internalType":"uint8","name":"_tertiaryStat","type":"uint8"},{"internalType":"address","name":"_attunementCrystal","type":"address"}],"name":"startMeditation","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"unpause","outputs":[],"stateMutability":"nonpayable","type":"function"},
{"inputs":[{"internalType":"uint256","name":"blockNumber","type":"uint256"}],"name":"vrf","outputs":[{"internalType":"bytes32","name":"result","type":"bytes32"}],"stateMutability":"view","type":"function"}
]
"""
ZERO_ADDRESS = '0x0000000000000000000000000000000000000000'
def block_explorer_link(txid):
return 'https://explorer.harmony.one/tx/' + str(txid)
def get_required_runes(level, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions._getRequiredRunes(level).call()
def active_attunement_crystals(address, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.activeAttunementCrystals(address).call()
def add_attunement_crystal(address, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.addAttunementCrystal(address).call()
def start_meditation(hero_id, stat1, stat2, stat3, attunement_crystal_address, private_key, nonce, gas_price_gwei, tx_timeout_seconds, rpc_address, logger):
if type(stat1) == str:
stat1 = stat2id(stat1)
if type(stat2) == str:
stat2 = stat2id(stat2)
if type(stat3) == str:
stat3 = stat2id(stat3)
w3 = Web3(Web3.HTTPProvider(rpc_address))
account = w3.eth.account.privateKeyToAccount(private_key)
w3.eth.default_account = account.address
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
tx = contract.functions.startMeditation(hero_id, stat1, stat2, stat3, attunement_crystal_address).buildTransaction(
{'gasPrice': w3.toWei(gas_price_gwei, 'gwei'), 'nonce': nonce})
logger.debug("Signing transaction")
signed_tx = w3.eth.account.sign_transaction(tx, private_key=private_key)
logger.debug("Sending transaction " + str(tx))
ret = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
logger.debug("Transaction successfully sent !")
logger.info("Waiting for transaction " + block_explorer_link(signed_tx.hash.hex()) + " to be mined")
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash=signed_tx.hash, timeout=tx_timeout_seconds,
poll_latency=3)
logger.info("Transaction mined !")
return tx_receipt
def complete_meditation(hero_id, private_key, nonce, gas_price_gwei, tx_timeout_seconds, rpc_address, logger):
w3 = Web3(Web3.HTTPProvider(rpc_address))
account = w3.eth.account.privateKeyToAccount(private_key)
w3.eth.default_account = account.address
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
tx = contract.functions.completeMeditation(hero_id).buildTransaction(
{'gasPrice': w3.toWei(gas_price_gwei, 'gwei'), 'nonce': nonce})
logger.debug("Signing transaction")
signed_tx = w3.eth.account.sign_transaction(tx, private_key=private_key)
logger.debug("Sending transaction " + str(tx))
ret = w3.eth.send_raw_transaction(signed_tx.rawTransaction)
logger.debug("Transaction successfully sent !")
logger.info("Waiting for transaction " + block_explorer_link(signed_tx.hash.hex()) + " to be mined")
tx_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash=signed_tx.hash, timeout=tx_timeout_seconds,
poll_latency=3)
logger.info("Transaction mined !")
return tx_receipt
def get_active_meditations(address, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.getActiveMeditations(address).call()
def get_hero_meditation(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.getHeroMeditation(hero_id).call()
if result[0] == 0:
return None
return result
def get_meditation(meditation_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
result = contract.functions.getMeditation(meditation_id).call()
if result[0] == 0:
return None
return result
def hero_to_meditation_id(hero_id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.heroToMeditation(hero_id).call()
def profile_active_meditations(address, id, rpc_address):
w3 = Web3(Web3.HTTPProvider(rpc_address))
contract_address = Web3.toChecksumAddress(CONTRACT_ADDRESS)
contract = w3.eth.contract(contract_address, abi=ABI)
return contract.functions.profileActiveMeditations(address, id).call()
def stat2id(label):
stats = {
'strength': 0,
'agility': 1,
'intelligence': 2,
'wisdom': 3,
'luck': 4,
'vitality': 5,
'endurance': 6,
'dexterity': 7
}
return stats.get(label, None)
| 134.954082 | 9,954 | 0.678235 |
2589afbeeb7d5098c002f2ea9e5a127fd87be61a | 9,541 | py | Python | ppcls/modeling/architectures/se_resnext_vd.py | wangxicoding/PaddleClas | b37f79a6dac6bf134b016cfed3ced877aaefa5f3 | [
"Apache-2.0"
] | null | null | null | ppcls/modeling/architectures/se_resnext_vd.py | wangxicoding/PaddleClas | b37f79a6dac6bf134b016cfed3ced877aaefa5f3 | [
"Apache-2.0"
] | null | null | null | ppcls/modeling/architectures/se_resnext_vd.py | wangxicoding/PaddleClas | b37f79a6dac6bf134b016cfed3ced877aaefa5f3 | [
"Apache-2.0"
] | null | null | null | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddle.nn.initializer import Uniform
import math
__all__ = ["SE_ResNeXt50_vd_32x4d", "SE_ResNeXt50_vd_32x4d", "SENet154_vd"]
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
is_vd_mode=False,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self.is_vd_mode = is_vd_mode
self._pool2d_avg = AvgPool2D(
kernel_size=2, stride=2, padding=0, ceil_mode=True)
self._conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
bn_name = name + '_bn'
self._batch_norm = BatchNorm(
num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
if self.is_vd_mode:
inputs = self._pool2d_avg(inputs)
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
cardinality,
reduction_ratio,
shortcut=True,
if_first=False,
name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu',
name='conv' + name + '_x1')
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
groups=cardinality,
stride=stride,
act='relu',
name='conv' + name + '_x2')
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 2 if cardinality == 32 else num_filters,
filter_size=1,
act=None,
name='conv' + name + '_x3')
self.scale = SELayer(
num_channels=num_filters * 2 if cardinality == 32 else num_filters,
num_filters=num_filters * 2 if cardinality == 32 else num_filters,
reduction_ratio=reduction_ratio,
name='fc' + name)
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 2
if cardinality == 32 else num_filters,
filter_size=1,
stride=1,
is_vd_mode=False if if_first else True,
name='conv' + name + '_prj')
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
scale = self.scale(conv2)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = paddle.add(x=short, y=scale)
y = F.relu(y)
return y
class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__()
self.pool2d_gap = AdaptiveAvgPool2D(1)
self._num_channels = num_channels
med_ch = int(num_channels / reduction_ratio)
stdv = 1.0 / math.sqrt(num_channels * 1.0)
self.squeeze = Linear(
num_channels,
med_ch,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"),
bias_attr=ParamAttr(name=name + '_sqz_offset'))
self.relu = nn.ReLU()
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear(
med_ch,
num_filters,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"),
bias_attr=ParamAttr(name=name + '_exc_offset'))
self.sigmoid = nn.Sigmoid()
def forward(self, input):
pool = self.pool2d_gap(input)
pool = paddle.squeeze(pool, axis=[2, 3])
squeeze = self.squeeze(pool)
squeeze = self.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = self.sigmoid(excitation)
excitation = paddle.unsqueeze(excitation, axis=[2, 3])
out = input * excitation
return out
class ResNeXt(nn.Layer):
def __init__(self, layers=50, class_dim=1000, cardinality=32):
super(ResNeXt, self).__init__()
self.layers = layers
self.cardinality = cardinality
self.reduction_ratio = 16
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(
supported_layers, layers)
supported_cardinality = [32, 64]
assert cardinality in supported_cardinality, \
"supported cardinality is {} but input cardinality is {}" \
.format(supported_cardinality, cardinality)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_channels = [128, 256, 512, 1024]
num_filters = [128, 256, 512,
1024] if cardinality == 32 else [256, 512, 1024, 2048]
self.conv1_1 = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=3,
stride=2,
act='relu',
name="conv1_1")
self.conv1_2 = ConvBNLayer(
num_channels=64,
num_filters=64,
filter_size=3,
stride=1,
act='relu',
name="conv1_2")
self.conv1_3 = ConvBNLayer(
num_channels=64,
num_filters=128,
filter_size=3,
stride=1,
act='relu',
name="conv1_3")
self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
self.block_list = []
n = 1 if layers == 50 or layers == 101 else 3
for block in range(len(depth)):
n += 1
shortcut = False
for i in range(depth[block]):
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block] if i == 0 else
num_filters[block] * int(64 // self.cardinality),
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=self.cardinality,
reduction_ratio=self.reduction_ratio,
shortcut=shortcut,
if_first=block == 0,
name=str(n) + '_' + str(i + 1)))
self.block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = AdaptiveAvgPool2D(1)
self.pool2d_avg_channels = num_channels[-1] * 2
stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
self.out = Linear(
self.pool2d_avg_channels,
class_dim,
weight_attr=ParamAttr(
initializer=Uniform(-stdv, stdv), name="fc6_weights"),
bias_attr=ParamAttr(name="fc6_offset"))
def forward(self, inputs):
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
for block in self.block_list:
y = block(y)
y = self.pool2d_avg(y)
y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
y = self.out(y)
return y
def SE_ResNeXt50_vd_32x4d(**args):
model = ResNeXt(layers=50, cardinality=32, **args)
return model
def SE_ResNeXt101_vd_32x4d(**args):
model = ResNeXt(layers=101, cardinality=32, **args)
return model
def SENet154_vd(**args):
model = ResNeXt(layers=152, cardinality=64, **args)
return model
| 33.36014 | 79 | 0.564511 |
51402bef5349288f3ab48dba2b52277e539c60e9 | 7,407 | py | Python | rclpy/test/test_task.py | werner-ne/rclpy | 11a07f81bdd23cc9a40fb2bedda6557b30ad5194 | [
"Apache-2.0"
] | null | null | null | rclpy/test/test_task.py | werner-ne/rclpy | 11a07f81bdd23cc9a40fb2bedda6557b30ad5194 | [
"Apache-2.0"
] | null | null | null | rclpy/test/test_task.py | werner-ne/rclpy | 11a07f81bdd23cc9a40fb2bedda6557b30ad5194 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from rclpy.task import Future
from rclpy.task import Task
class DummyExecutor:
def __init__(self):
self.done_callbacks = []
def create_task(self, cb, *args):
self.done_callbacks.append((cb, args))
class TestTask(unittest.TestCase):
def test_task_normal_callable(self):
def func():
return 'Sentinel Result'
t = Task(func)
t()
self.assertTrue(t.done())
self.assertEqual('Sentinel Result', t.result())
def test_task_lambda(self):
def func():
return 'Sentinel Result'
t = Task(lambda: func())
t()
self.assertTrue(t.done())
self.assertEqual('Sentinel Result', t.result())
def test_coroutine(self):
called1 = False
called2 = False
async def coro():
nonlocal called1
nonlocal called2
called1 = True
await asyncio.sleep(0)
called2 = True
return 'Sentinel Result'
t = Task(coro)
t()
self.assertTrue(called1)
self.assertFalse(called2)
called1 = False
t()
self.assertFalse(called1)
self.assertTrue(called2)
self.assertTrue(t.done())
self.assertEqual('Sentinel Result', t.result())
def test_done_callback_scheduled(self):
executor = DummyExecutor()
t = Task(lambda: None, executor=executor)
t.add_done_callback('Sentinel Value')
t()
self.assertTrue(t.done())
self.assertEqual(1, len(executor.done_callbacks))
self.assertEqual('Sentinel Value', executor.done_callbacks[0][0])
args = executor.done_callbacks[0][1]
self.assertEqual(1, len(args))
self.assertEqual(t, args[0])
def test_done_task_done_callback_scheduled(self):
executor = DummyExecutor()
t = Task(lambda: None, executor=executor)
t()
self.assertTrue(t.done())
t.add_done_callback('Sentinel Value')
self.assertEqual(1, len(executor.done_callbacks))
self.assertEqual('Sentinel Value', executor.done_callbacks[0][0])
args = executor.done_callbacks[0][1]
self.assertEqual(1, len(args))
self.assertEqual(t, args[0])
def test_done_task_called(self):
called = False
def func():
nonlocal called
called = True
t = Task(func)
t()
self.assertTrue(called)
self.assertTrue(t.done())
called = False
t()
self.assertFalse(called)
self.assertTrue(t.done())
def test_cancelled(self):
t = Task(lambda: None)
t.cancel()
self.assertTrue(t.cancelled())
def test_done_task_cancelled(self):
t = Task(lambda: None)
t()
t.cancel()
self.assertFalse(t.cancelled())
def test_exception(self):
def func():
e = Exception()
e.sentinel_value = 'Sentinel Exception'
raise e
t = Task(func)
t()
self.assertTrue(t.done())
self.assertEqual('Sentinel Exception', t.exception().sentinel_value)
with self.assertRaises(Exception):
t.result()
def test_coroutine_exception(self):
async def coro():
e = Exception()
e.sentinel_value = 'Sentinel Exception'
raise e
t = Task(coro)
t()
self.assertTrue(t.done())
self.assertEqual('Sentinel Exception', t.exception().sentinel_value)
with self.assertRaises(Exception):
t.result()
def test_task_normal_callable_args(self):
arg_in = 'Sentinel Arg'
def func(arg):
return arg
t = Task(func, args=(arg_in,))
t()
self.assertEqual('Sentinel Arg', t.result())
def test_coroutine_args(self):
arg_in = 'Sentinel Arg'
async def coro(arg):
return arg
t = Task(coro, args=(arg_in,))
t()
self.assertEqual('Sentinel Arg', t.result())
def test_task_normal_callable_kwargs(self):
arg_in = 'Sentinel Arg'
def func(kwarg=None):
return kwarg
t = Task(func, kwargs={'kwarg': arg_in})
t()
self.assertEqual('Sentinel Arg', t.result())
def test_coroutine_kwargs(self):
arg_in = 'Sentinel Arg'
async def coro(kwarg=None):
return kwarg
t = Task(coro, kwargs={'kwarg': arg_in})
t()
self.assertEqual('Sentinel Arg', t.result())
def test_executing(self):
t = Task(lambda: None)
self.assertFalse(t.executing())
class TestFuture(unittest.TestCase):
def test_cancelled(self):
f = Future()
f.cancel()
self.assertTrue(f.cancelled())
def test_done(self):
f = Future()
self.assertFalse(f.done())
f.set_result(None)
self.assertTrue(f.done())
def test_set_result(self):
f = Future()
f.set_result('Sentinel Result')
self.assertEqual('Sentinel Result', f.result())
self.assertTrue(f.done())
def test_set_exception(self):
f = Future()
f.set_exception('Sentinel Exception')
self.assertEqual('Sentinel Exception', f.exception())
self.assertTrue(f.done())
def test_await(self):
f = Future()
async def coro():
nonlocal f
return await f
c = coro()
c.send(None)
f.set_result('Sentinel Result')
try:
c.send(None)
except StopIteration as e:
self.assertEqual('Sentinel Result', e.value)
def test_await_exception(self):
f = Future()
async def coro():
nonlocal f
return await f
c = coro()
c.send(None)
f.set_exception(RuntimeError('test exception'))
with self.assertRaises(RuntimeError):
c.send(None)
def test_cancel_schedules_callbacks(self):
executor = DummyExecutor()
f = Future(executor=executor)
f.add_done_callback(lambda f: None)
f.cancel()
self.assertTrue(executor.done_callbacks)
def test_set_result_schedules_callbacks(self):
executor = DummyExecutor()
f = Future(executor=executor)
f.add_done_callback(lambda f: None)
f.set_result('Anything')
self.assertTrue(executor.done_callbacks)
def test_set_exception_schedules_callbacks(self):
executor = DummyExecutor()
f = Future(executor=executor)
f.add_done_callback(lambda f: None)
f.set_exception('Anything')
self.assertTrue(executor.done_callbacks)
if __name__ == '__main__':
unittest.main()
| 26.453571 | 76 | 0.595383 |
e7706ebcf5e1d361a0a464e89b382e47cdcb8068 | 1,180 | py | Python | test/hummingbot/strategy/dev_1_get_order_book/test_dev_1_get_order_book_config_map.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/hummingbot/strategy/dev_1_get_order_book/test_dev_1_get_order_book_config_map.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/hummingbot/strategy/dev_1_get_order_book/test_dev_1_get_order_book_config_map.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | import unittest
from copy import deepcopy
from hummingbot.client.settings import AllConnectorSettings
from hummingbot.strategy.dev_1_get_order_book.dev_1_get_order_book_config_map import (
dev_1_get_order_book_config_map,
trading_pair_prompt,
)
class Dev1GetOrderBookConfigMapTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.exchange = "binance"
def setUp(self) -> None:
super().setUp()
self.config_backup = deepcopy(dev_1_get_order_book_config_map)
def tearDown(self) -> None:
self.reset_config_map()
super().tearDown()
def reset_config_map(self):
for key, value in self.config_backup.items():
dev_1_get_order_book_config_map[key] = value
def test_trading_pair_prompt(self):
dev_1_get_order_book_config_map["exchange"].value = self.exchange
example = AllConnectorSettings.get_example_pairs().get(self.exchange)
prompt = trading_pair_prompt()
expected = f"Enter the token trading pair to fetch its order book on {self.exchange} (e.g. {example}) >>> "
self.assertEqual(expected, prompt)
| 30.25641 | 115 | 0.711017 |
1fa2e6f363a860308819417a59270d0e81808eb9 | 2,538 | py | Python | app/recipe/tests/test_tags_api.py | jimalman/recipe-app-api | dcf64199033d68b80c02816389c1c9966b9d4a32 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | jimalman/recipe-app-api | dcf64199033d68b80c02816389c1c9966b9d4a32 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | jimalman/recipe-app-api | dcf64199033d68b80c02816389c1c9966b9d4a32 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publically available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@udemy.com',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@udemy.com',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Confort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tags_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 30.578313 | 71 | 0.654452 |
0299bc06edeaca31554c5006346e97094c9924c5 | 385 | py | Python | plugins/cosigner_pool/__init__.py | johnlito123/electrum-xuez | 4eb35889f95e31f0a08d5488082df9ab94b4c3ca | [
"MIT"
] | null | null | null | plugins/cosigner_pool/__init__.py | johnlito123/electrum-xuez | 4eb35889f95e31f0a08d5488082df9ab94b4c3ca | [
"MIT"
] | null | null | null | plugins/cosigner_pool/__init__.py | johnlito123/electrum-xuez | 4eb35889f95e31f0a08d5488082df9ab94b4c3ca | [
"MIT"
] | 4 | 2018-07-07T16:35:50.000Z | 2018-12-25T16:02:52.000Z | from electrum_xuez.i18n import _
fullname = _('Cosigner Pool')
description = ' '.join([
_("This plugin facilitates the use of multi-signatures wallets."),
_("It sends and receives partially signed transactions from/to your cosigner wallet."),
_("Transactions are encrypted and stored on a remote server.")
])
#requires_wallet_type = ['2of2', '2of3']
available_for = ['qt']
| 38.5 | 91 | 0.722078 |
c7d9b5c1298d974acb6a23c5f8a4c88b24b93ea9 | 140 | py | Python | exercicios/desafio 61.py | ibianco91/curso_em_video | 13829b5d2e2290fcffe47ef0ab902b5e4a24a0ed | [
"MIT"
] | null | null | null | exercicios/desafio 61.py | ibianco91/curso_em_video | 13829b5d2e2290fcffe47ef0ab902b5e4a24a0ed | [
"MIT"
] | null | null | null | exercicios/desafio 61.py | ibianco91/curso_em_video | 13829b5d2e2290fcffe47ef0ab902b5e4a24a0ed | [
"MIT"
] | null | null | null | a = int(input('Digite o primeiro termo: '))
r = int(input('Digite a razão: '))
c = 1
while c <= 10:
a +=r
c +=1
print(a-r)
| 10.769231 | 43 | 0.507143 |
ba1eca4b3fc14c97148de3f4410fd3667d5eae73 | 39,219 | py | Python | pyvcloud/vcd/org.py | pacogomez/pyvcloud | 731aded20b999d269472caf65df774c284dd49b6 | [
"Apache-2.0"
] | null | null | null | pyvcloud/vcd/org.py | pacogomez/pyvcloud | 731aded20b999d269472caf65df774c284dd49b6 | [
"Apache-2.0"
] | 1 | 2017-12-28T13:50:54.000Z | 2017-12-28T17:28:15.000Z | pyvcloud/vcd/org.py | pacogomez/pyvcloud | 731aded20b999d269472caf65df774c284dd49b6 | [
"Apache-2.0"
] | 1 | 2017-12-28T10:22:55.000Z | 2017-12-28T10:22:55.000Z | # VMware vCloud Director Python SDK
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tarfile
import tempfile
import time
import traceback
from lxml import etree
from lxml import objectify
import os
from pyvcloud.vcd.acl import Acl
from pyvcloud.vcd.client import _TaskMonitor
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import E_OVF
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import find_link
from pyvcloud.vcd.client import get_links
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import QueryResultFormat
from pyvcloud.vcd.client import RelationType
from pyvcloud.vcd.system import System
from pyvcloud.vcd.utils import to_dict
DEFAULT_CHUNK_SIZE = 1024 * 1024
class Org(object):
def __init__(self, client, href=None, resource=None):
"""Constructor for Org objects.
:param client: (pyvcloud.vcd.client): The client.
:param href: (str): URI of the entity.
:param resource: (lxml.objectify.ObjectifiedElement): XML
representation of the entity.
"""
self.client = client
self.href = href
self.resource = resource
if resource is not None:
self.href = resource.get('href')
self.href_admin = self.href.replace('/api/org/', '/api/admin/org/')
def reload(self):
self.resource = self.client.get_resource(self.href)
def get_name(self):
if self.resource is None:
self.resource = self.client.get_resource(self.href)
return self.resource.get('name')
def create_catalog(self, name, description):
if self.resource is None:
self.resource = self.client.get_resource(self.href)
catalog = E.AdminCatalog(E.Description(description), name=name)
return self.client.post_linked_resource(
self.resource, RelationType.ADD, EntityType.ADMIN_CATALOG.value,
catalog)
def create_role(self, role_name, description, rights):
"""Creates a role in the organization
:param role_name: (str): name of the role to be created
:param description: (str): description of the role
:param rights: (tuple of (str)) names of zero or more rights to be
associated with the role
:return: RoleType just created
"""
org_admin_resource = self.client.get_resource(self.href_admin)
role = E.Role(
E.Description(description), E.RightReferences(), name=role_name)
if rights is None:
rights = ()
for right in tuple(rights):
right_record = self.get_right(right)
role.RightReferences.append(
E.RightReference(
name=right_record.get('name'),
href=right_record.get('href'),
type=EntityType.RIGHT.value))
return self.client.post_linked_resource(
org_admin_resource, RelationType.ADD, EntityType.ROLE.value, role)
def delete_role(self, name):
"""Deletes specified role from the organization
:param name: (str): name of the role
:return: None
""" # NOQA
if self.resource is None:
self.resource = self.client.get_resource(self.href)
role_record = self.get_role(name)
self.client.delete_resource(role_record.get('href'))
def delete_catalog(self, name):
org = self.client.get_resource(self.href)
links = get_links(
org, rel=RelationType.DOWN, media_type=EntityType.CATALOG.value)
for link in links:
if name == link.name:
admin_href = link.href.replace('/api/catalog/',
'/api/admin/catalog/')
return self.client.delete_resource(admin_href)
raise Exception('Catalog not found.')
def list_catalogs(self):
if self.client.is_sysadmin():
resource_type = 'adminCatalog'
else:
resource_type = 'catalog'
result = []
q = self.client.get_typed_query(
resource_type, query_result_format=QueryResultFormat.ID_RECORDS)
records = list(q.execute())
if len(records) > 0:
for r in records:
result.append(
to_dict(
r,
resource_type=resource_type,
exclude=['owner', 'org']))
return result
def get_catalog(self, name):
return self.get_catalog_resource(name, False)
def get_catalog_resource(self, name, is_admin_operation=False):
org = self.client.get_resource(self.href)
links = get_links(
org, rel=RelationType.DOWN, media_type=EntityType.CATALOG.value)
for link in links:
if name == link.name:
href = link.href
if is_admin_operation:
href = href.replace('/api/catalog/', '/api/admin/catalog/')
return self.client.get_resource(href)
raise Exception('Catalog not found (or)'
' Access to resource is forbidden')
def update_catalog(self, old_catalog_name, new_catalog_name, description):
"""Update the name and/or description of a catalog.
:param old_catalog_name: (str): The current name of the catalog.
:param new_catalog_name: (str): The new name of the catalog.
:param description: (str): The new description of the catalog.
:return: A :class:`lxml.objectify.StringElement` object describing
the updated catalog.
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
org = self.resource
links = get_links(
org, rel=RelationType.DOWN, media_type=EntityType.CATALOG.value)
for link in links:
if old_catalog_name == link.name:
catalog = self.client.get_resource(link.href)
href = catalog.get('href')
admin_href = href.replace('/api/catalog/',
'/api/admin/catalog/')
admin_view_of_catalog = self.client.get_resource(admin_href)
if new_catalog_name is not None:
admin_view_of_catalog.set('name', new_catalog_name)
if description is not None:
admin_view_of_catalog['Description'] = E.Description(
description)
return self.client.put_resource(
admin_href,
admin_view_of_catalog,
media_type=EntityType.ADMIN_CATALOG.value)
raise Exception('Catalog not found.')
def share_catalog(self, name, share=True):
catalog = self.get_catalog(name)
is_published = 'true' if share else 'false'
params = E.PublishCatalogParams(E.IsPublished(is_published))
href = catalog.get('href') + '/action/publish'
admin_href = href.replace('/api/catalog/', '/api/admin/catalog/')
return self.client.post_resource(
admin_href,
params,
media_type=EntityType.PUBLISH_CATALOG_PARAMS.value)
def list_catalog_items(self, name):
catalog = self.get_catalog(name)
items = []
for i in catalog.CatalogItems.getchildren():
items.append({'name': i.get('name'), 'id': i.get('id')})
return items
def get_catalog_item(self, name, item_name):
catalog = self.get_catalog(name)
for i in catalog.CatalogItems.getchildren():
if i.get('name') == item_name:
return self.client.get_resource(i.get('href'))
raise Exception('Catalog item not found.')
def delete_catalog_item(self, name, item_name):
catalog = self.get_catalog(name)
for i in catalog.CatalogItems.getchildren():
if i.get('name') == item_name:
return self.client.delete_resource(i.get('href'))
raise Exception('Item not found.')
def upload_media(self,
catalog_name,
file_name,
item_name=None,
description='',
chunk_size=DEFAULT_CHUNK_SIZE,
callback=None):
stat_info = os.stat(file_name)
catalog = self.get_catalog(catalog_name)
if item_name is None:
item_name = os.path.basename(file_name)
image_type = os.path.splitext(item_name)[1][1:]
media = E.Media(
name=item_name, size=str(stat_info.st_size), imageType=image_type)
media.append(E.Description(description))
catalog_item = self.client.post_resource(
catalog.get('href') + '/action/upload', media,
EntityType.MEDIA.value)
entity = self.client.get_resource(catalog_item.Entity.get('href'))
file_href = entity.Files.File.Link.get('href')
return self.upload_file(
file_name, file_href, chunk_size=chunk_size, callback=callback)
def download_catalog_item(self,
catalog_name,
item_name,
file_name,
chunk_size=DEFAULT_CHUNK_SIZE,
callback=None,
task_callback=None):
item = self.get_catalog_item(catalog_name, item_name)
item_type = item.Entity.get('type')
enable_href = item.Entity.get('href') + '/action/enableDownload'
task = self.client.post_resource(enable_href, None, None)
tm = _TaskMonitor(self.client)
tm.wait_for_success(task, 60, 1, callback=task_callback)
item = self.client.get_resource(item.Entity.get('href'))
bytes_written = 0
if item_type == EntityType.MEDIA.value:
size = item.Files.File.get('size')
download_href = item.Files.File.Link.get('href')
bytes_written = self.client.download_from_uri(
download_href,
file_name,
chunk_size=chunk_size,
size=size,
callback=callback)
elif item_type == EntityType.VAPP_TEMPLATE.value:
ovf_descriptor = self.client.get_linked_resource(
item, RelationType.DOWNLOAD_DEFAULT, EntityType.TEXT_XML.value)
transfer_uri = find_link(item, RelationType.DOWNLOAD_DEFAULT,
EntityType.TEXT_XML.value).href
transfer_uri = transfer_uri.replace('/descriptor.ovf', '/')
tempdir = None
cwd = os.getcwd()
try:
tempdir = tempfile.mkdtemp(dir='.')
ovf_file = os.path.join(tempdir, 'descriptor.ovf')
with open(ovf_file, 'wb') as f:
payload = etree.tostring(
ovf_descriptor,
pretty_print=True,
xml_declaration=True,
encoding='utf-8')
f.write(payload)
ns = '{' + NSMAP['ovf'] + '}'
files = []
for f in ovf_descriptor.References.File:
source_file = {
'href': f.get(ns + 'href'),
'name': f.get(ns + 'id'),
'size': f.get(ns + 'size')
}
target_file = os.path.join(tempdir, source_file['href'])
uri = transfer_uri + source_file['href']
num_bytes = self.client.download_from_uri(
uri,
target_file,
chunk_size=chunk_size,
size=source_file['size'],
callback=callback)
if num_bytes != source_file['size']:
raise Exception('download incomplete for file %s' %
source_file['href'])
files.append(source_file)
with tarfile.open(file_name, 'w') as tar:
os.chdir(tempdir)
tar.add('descriptor.ovf')
for f in files:
tar.add(f['href'])
finally:
if tempdir is not None:
os.chdir(cwd)
stat_info = os.stat(file_name)
bytes_written = stat_info.st_size
return bytes_written
def upload_file(self,
file_name,
href,
chunk_size=DEFAULT_CHUNK_SIZE,
callback=None):
transferred = 0
stat_info = os.stat(file_name)
with open(file_name, 'rb') as f:
while transferred < stat_info.st_size:
my_bytes = f.read(chunk_size)
if len(my_bytes) <= chunk_size:
range_str = 'bytes %s-%s/%s' % \
(transferred,
len(my_bytes) - 1,
stat_info.st_size)
self.client.upload_fragment(href, my_bytes, range_str)
transferred += len(my_bytes)
if callback is not None:
callback(transferred, stat_info.st_size)
return transferred
def upload_ovf(self,
catalog_name,
file_name,
item_name=None,
description='',
chunk_size=DEFAULT_CHUNK_SIZE,
callback=None):
catalog = self.get_catalog(catalog_name)
if item_name is None:
item_name = os.path.basename(file_name)
tempdir = tempfile.mkdtemp(dir='.')
total_bytes = 0
try:
ova = tarfile.open(file_name)
ova.extractall(path=tempdir)
ova.close()
ovf_file = None
files = os.listdir(tempdir)
for f in files:
fn, ex = os.path.splitext(f)
if ex == '.ovf':
ovf_file = os.path.join(tempdir, f)
break
if ovf_file is not None:
stat_info = os.stat(ovf_file)
total_bytes += stat_info.st_size
ovf = objectify.parse(ovf_file)
files = []
ns = '{' + NSMAP['ovf'] + '}'
for f in ovf.getroot().References.File:
source_file = {
'href': f.get(ns + 'href'),
'name': f.get(ns + 'id'),
'size': f.get(ns + 'size')
}
files.append(source_file)
if item_name is None:
item_name = os.path.basename(file_name)
params = E.UploadVAppTemplateParams(name=item_name)
params.append(E.Description(description))
catalog_item = self.client.post_resource(
catalog.get('href') + '/action/upload', params,
EntityType.UPLOAD_VAPP_TEMPLATE_PARAMS.value)
entity = self.client.get_resource(
catalog_item.Entity.get('href'))
file_href = entity.Files.File.Link.get('href')
self.client.put_resource(file_href, ovf, 'text/xml')
while True:
time.sleep(5)
entity = self.client.get_resource(
catalog_item.Entity.get('href'))
if len(entity.Files.File) > 1:
break
for source_file in files:
for target_file in entity.Files.File:
if source_file.get('href') == target_file.get('name'):
file_path = os.path.join(tempdir,
source_file.get('href'))
total_bytes += self.upload_file(
file_path,
target_file.Link.get('href'),
chunk_size=chunk_size,
callback=callback)
shutil.rmtree(tempdir)
except Exception as e:
print(traceback.format_exc())
shutil.rmtree(tempdir)
raise e
return total_bytes
def get_vdc(self, name):
if self.resource is None:
self.resource = self.client.get_resource(self.href)
links = get_links(
self.resource,
rel=RelationType.DOWN,
media_type=EntityType.VDC.value)
for link in links:
if name == link.name:
return self.client.get_resource(link.href)
raise Exception("Vdc \'%s\' not found" % name)
def list_vdcs(self):
if self.resource is None:
self.resource = self.client.get_resource(self.href)
result = []
for v in get_links(self.resource, media_type=EntityType.VDC.value):
result.append({'name': v.name, 'href': v.href})
return result
def capture_vapp(self,
catalog_resource,
vapp_href,
catalog_item_name,
description,
customize_on_instantiate=False):
contents = E.CaptureVAppParams(
E.Description(description),
E.Source(href=vapp_href),
name=catalog_item_name)
if customize_on_instantiate:
contents.append(
E.CustomizationSection(
E_OVF.Info('VApp template customization section'),
E.CustomizeOnInstantiate('true')))
return self.client.post_linked_resource(
catalog_resource,
rel=RelationType.ADD,
media_type=EntityType.CAPTURE_VAPP_PARAMS.value,
contents=contents)
def create_user(self,
user_name,
password,
role_href,
full_name='',
description='',
email='',
telephone='',
im='',
alert_email='',
alert_email_prefix='',
stored_vm_quota=0,
deployed_vm_quota=0,
is_group_role=False,
is_default_cached=False,
is_external=False,
is_alert_enabled=False,
is_enabled=False):
"""Create User in the current Org
:param user_name: The username of the user
:param password: The password of the user
:param role_href: The href of the user role
:param full_name: The full name of the user
:param description: The description for the User
:param email: The email of the user
:param telephone: The telephone of the user
:param im: The im address of the user
:param alert_email: The alert email address
:param alert_email_prefix: The string to prepend to the alert message
subject line
:param stored_vm_quota: The quota of vApps that this user can store
:param deployed_vm_quota: The quota of vApps that this user can deploy
concurrently
:param is_group_role: Indicates if the user has a group role
:param is_default_cached: Indicates if user should be cached
:param is_external: Indicates if user is imported from an external
source
:param is_alert_enabled: The alert email address
:param is_enabled: Enable user
:return: (UserType) Created user object
"""
resource_admin = self.client.get_resource(self.href_admin)
user = E.User(
E.Description(description),
E.FullName(full_name),
E.EmailAddress(email),
E.Telephone(telephone),
E.IsEnabled(is_enabled),
E.IM(im),
E.IsAlertEnabled(is_alert_enabled),
E.AlertEmailPrefix(alert_email_prefix),
E.AlertEmail(alert_email),
E.IsExternal(is_external),
E.IsDefaultCached(is_default_cached),
E.IsGroupRole(is_group_role),
E.StoredVmQuota(stored_vm_quota),
E.DeployedVmQuota(deployed_vm_quota),
E.Role(href=role_href),
E.Password(password),
name=user_name)
return self.client.post_linked_resource(
resource_admin, RelationType.ADD, EntityType.USER.value, user)
def update_user(self, user_name, is_enabled=None):
"""Update an User
:param user_name: (str): username of the user
:param is_enabled: (bool): enable/disable the user
:return: (UserType) Updated user object
"""
user = self.get_user(user_name)
if is_enabled is not None:
if hasattr(user, 'IsEnabled'):
user['IsEnabled'] = E.IsEnabled(is_enabled)
return self.client.put_resource(
user.get('href'), user, EntityType.USER.value)
return user
def get_user(self, user_name):
"""Retrieve user record from current Organization
:param user_name: user name of the record to be retrieved
:return: User record
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
resource_type = 'user'
org_filter = None
if self.client.is_sysadmin():
resource_type = 'adminUser'
org_filter = 'org==%s' % self.resource.get('href')
query = self.client.get_typed_query(
resource_type,
query_result_format=QueryResultFormat.REFERENCES,
equality_filter=('name', user_name),
qfilter=org_filter)
records = list(query.execute())
if len(records) == 0:
raise Exception('user not found')
elif len(records) > 1:
raise Exception('multiple users found')
return self.client.get_resource(records[0].get('href'))
def delete_user(self, user_name):
"""Delete user record from current organization
:param user_name: (str) name of the user that (org/sys)admins wants to
delete
:return: result of calling DELETE on the user resource
"""
user = self.get_user(user_name)
return self.client.delete_resource(user.get('href'))
def get_role(self, role_name):
"""Retrieve role object with a particular name in the current Org
:param role_name: (str): The name of the role object to be retrieved
:return: (dict): Role record in dict format
"""
role_record = self.list_roles(('name', role_name))
if len(role_record) < 1:
raise Exception('Role \'%s\' does not exist.' % role_name)
return role_record[0]
def list_roles(self, name_filter=None):
"""Retrieve the list of roles in the current Org
:param name_filter: (tuple): (name ,'role name') Filter roles by
'role name'
:return: (list): (RoleRecord) List of roles
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
org_filter = None
resource_type = 'role'
if self.client.is_sysadmin():
resource_type = 'adminRole'
org_filter = 'org==%s' % self.resource.get('href')
query = self.client.get_typed_query(
resource_type,
query_result_format=QueryResultFormat.RECORDS,
equality_filter=name_filter,
qfilter=org_filter)
result = []
for r in list(query.execute()):
result.append(
to_dict(
r,
resource_type=resource_type,
exclude=['org', 'orgName']))
return result
def get_right(self, right_name):
"""Retrieves corresponding record of the specified right.
:param right_name: (str): The name of the right record to be retrieved
:return: (dict): Right record in dict format
"""
right_record = self.list_rights(('name', right_name))
if len(right_record) < 1:
raise Exception('Right \'%s\' does not exist.' % right_name)
return right_record[0]
def list_rights(self, name_filter=None):
"""Retrieve the list of rights in the current Org
:param name_filter: (tuple): (name ,'right name') Filter the rights by
'right name'
:return: (list): (RightRecord) List of rights
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
resource_type = 'right'
query = self.client.get_typed_query(
resource_type,
query_result_format=QueryResultFormat.RECORDS,
equality_filter=name_filter)
records = list(query.execute())
result = []
if len(records) > 0:
for r in records:
result.append(
to_dict(r, resource_type=resource_type, exclude=[]))
return result
def get_catalog_access_control_settings(self, catalog_name):
"""Get the access control settings of a catalog.
:param catalog_name: (str): The name of the catalog.
:return: A :class:`lxml.objectify.StringElement` object representing
the updated access control setting of the catalog.
""" # NOQA
catalog_resource = self.get_catalog(name=catalog_name)
access_control_settings = self.client.get_linked_resource(
catalog_resource, RelationType.DOWN,
EntityType.CONTROL_ACCESS_PARAMS.value)
return access_control_settings
def add_catalog_access_settings(self, catalog_name,
access_settings_list=None):
"""Add access settings to a particular catalog.
:param catalog_name: (str): name of the catalog for which acl needs
to be added.
:param access_settings_list: (list of dict): list of access_setting
in the dict format. Each dict contains:
type: (str): type of the subject. One of 'org' or 'user'.
name: (str): name of the user or org.
access_level: (str): access_level of the particular subject. One of
'ReadOnly', 'Change', 'FullControl'
:return: A :class:`lxml.objectify.StringElement` object representing
the updated access control setting of the catalog.
"""
catalog_resource = self.get_catalog(name=catalog_name)
acl = Acl(self.client, catalog_resource)
return acl.add_access_settings(access_settings_list)
def remove_catalog_access_settings(self, catalog_name,
access_settings_list=None,
remove_all=False):
"""Remove access settings from a particular catalog.
:param catalog_name: (name): catalog name from which access_settings
should be removed.
:param access_settings_list: (list of dict): list of access_setting
in the dict format. Each dict contains:
type: (str): type of the subject. One of 'org' or 'user'.
name: (str): name of the user or org.
:param remove_all: (bool) : True if all access settings of the catalog
should be removed
:return: A :class:`lxml.objectify.StringElement` object representing
the updated access control setting of the catalog.
"""
catalog_resource = self.get_catalog(name=catalog_name)
acl = Acl(self.client, catalog_resource)
return acl.remove_access_settings(access_settings_list, remove_all)
def share_catalog_access(self, catalog_name,
everyone_access_level='ReadOnly'):
"""Share the catalog to all members of the organization.
:param catalog_name: (str): catalog name whose access should be
shared to everyone.
:param everyone_access_level: (str) : access level when sharing the
catalog with everyone. One of 'ReadOnly', 'Change', 'FullControl'
'ReadOnly' by default.
:return: A :class:`lxml.objectify.StringElement` object representing
the updated access control setting of the catalog.
"""
catalog_resource = self.get_catalog(name=catalog_name)
acl = Acl(self.client, catalog_resource)
return acl.share_access(everyone_access_level)
def unshare_catalog_access(self, catalog_name):
"""Unshare the catalog from all members of current organization.
:param catalog_name: (str): catalog name whose access should be
unshared from everyone.
:return: A :class:`lxml.objectify.StringElement` object representing
the updated access control setting of the resource.
"""
catalog_resource = self.get_catalog(name=catalog_name)
acl = Acl(self.client, catalog_resource)
return acl.unshare_access()
def change_catalog_owner(self, catalog_name, user_name):
"""Change the ownership of Catalog to a given user
:param catalog_name: Catalog whose ownership needs to be changed
:param user_name: New Owner of the Catalog
:return: None
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
catalog_resource = self.get_catalog_resource(
catalog_name, is_admin_operation=True)
owner_link = find_link(
catalog_resource,
rel=RelationType.DOWN,
media_type=EntityType.OWNER.value,
fail_if_absent=True)
catalog_href = owner_link.href
user_resource = self.get_user(user_name)
new_owner = catalog_resource.Owner
new_owner.User.set('href', user_resource.get('href'))
objectify.deannotate(new_owner)
return self.client.put_resource(catalog_href, new_owner,
EntityType.OWNER.value)
def update_org(self, is_enabled=None):
"""Update an organization
:param is_enabled: (bool): enable/disable the organization
:return: (AdminOrgType) updated org object.
"""
org_admin_resource = self.client.get_resource(self.href_admin)
if is_enabled is not None:
if hasattr(org_admin_resource, 'IsEnabled'):
org_admin_resource['IsEnabled'] = E.IsEnabled(is_enabled)
return self.client.put_resource(self.href_admin,
org_admin_resource,
EntityType.ADMIN_ORG.value)
return org_admin_resource
def create_org_vdc(self,
vdc_name,
provider_vdc_name,
description='',
allocation_model='AllocationVApp',
cpu_units='MHz',
cpu_allocated=0,
cpu_limit=0,
mem_units='MB',
mem_allocated=0,
mem_limit=0,
nic_quota=0,
network_quota=0,
vm_quota=0,
storage_profiles=[],
resource_guaranteed_memory=None,
resource_guaranteed_cpu=None,
vcpu_in_mhz=None,
is_thin_provision=None,
network_pool_name=None,
uses_fast_provisioning=None,
over_commit_allowed=None,
vm_discovery_enabled=None,
is_enabled=True):
"""Create Organization VDC in the current Org.
:param vdc_name (str): The name of the new org vdc.
:param provider_vdc_name (str): The name of an existing provider vdc.
:param description (str): The description of the new org vdc.
:param allocation_model (str): The allocation model used by this vDC.
One of AllocationVApp, AllocationPool or ReservationPool.
:param cpu_units (str): The cpu units compute capacity allocated to
this vDC. One of MHz or GHz
:param cpu_allocated (int): Capacity that is committed to be available.
:param cpu_limit (int): Capacity limit relative to the value specified
for Allocation.
:param mem_units (str): The memory units compute capacity allocated to
this vDC. One of MB or GB.
:param mem_allocated (int): Memory capacity that is committed to be
available.
:param mem_limit (int): Memory capacity limit relative to the value
specified for Allocation.
:param nic_quota (int): Maximum number of virtual NICs allowed in this
vDC. Defaults to 0, which specifies an unlimited number.
:param network_quota (int): Maximum number of network objects that can
be deployed in this vDC. Defaults to 0, which means no networks can
be deployed.
:param vm_quota (int): The maximum number of VMs that can be created in
this vDC. Defaults to 0, which specifies an unlimited number.
:param storage_profiles: List of provider vDC storage profiles to add
to this vDC.
Each item is a dictionary that should include the following
elements:
name: (string) name of the PVDC storage profile.
enabled: (bool) True if the storage profile is enabled for this
vDC.
units: (string) Units used to define limit. One of MB or GB.
limit: (int) Max number of units allocated for this storage
profile.
default: (bool) True if this is default storage profile for
this vDC.
:param resource_guaranteed_memory (float): Percentage of allocated CPU
resources guaranteed to vApps deployed in this vDC.
Value defaults to 1.0 if the element is empty.
:param resource_guaranteed_cpu (float): Percentage of allocated memory
resources guaranteed to vApps deployed in this vDC.
Value defaults to 1.0 if the element is empty.
:param vcpu_in_mhz (int): Specifies the clock frequency, in Megahertz,
for any virtual CPU that is allocated to a VM.
:param is_thin_provision (bool): Boolean to request thin provisioning.
:param network_pool_name (str): Reference to a network pool in the
Provider vDC.
:param uses_fast_provisioning (bool): Boolean to request fast
provisioning.
:param over_commit_allowed (bool): Set to false to disallow creation of
the VDC if the AllocationModel is AllocationPool or ReservationPool
and the ComputeCapacity you specified is greater than what the
backing Provider VDC can supply. Defaults to true if empty or
missing.
:param vm_discovery_enabled (bool): True if discovery of vCenter VMs
is enabled for resource pools backing this vDC.
:param is_enabled (bool): True if this vDC is enabled for use by the
organization users.
:return: A :class:`lxml.objectify.StringElement` object describing
the new VDC.
"""
if self.resource is None:
self.resource = self.client.get_resource(self.href)
sys_admin_resource = self.client.get_admin()
system = System(self.client, admin_resource=sys_admin_resource)
pvdc = system.get_provider_vdc(provider_vdc_name)
resource_admin = self.client.get_resource(self.href_admin)
params = E.CreateVdcParams(
E.Description(description),
E.AllocationModel(allocation_model),
E.ComputeCapacity(
E.Cpu(
E.Units(cpu_units), E.Allocated(cpu_allocated),
E.Limit(cpu_limit)),
E.Memory(
E.Units(mem_units), E.Allocated(mem_allocated),
E.Limit(mem_limit))),
E.NicQuota(nic_quota),
E.NetworkQuota(network_quota),
E.VmQuota(vm_quota),
E.IsEnabled(is_enabled),
name=vdc_name)
for sp in storage_profiles:
pvdc_sp = system.get_provider_vdc_storage_profile(sp['name'])
params.append(
E.VdcStorageProfile(
E.Enabled(sp['enabled']),
E.Units(sp['units']),
E.Limit(sp['limit']),
E.Default(sp['default']),
E.ProviderVdcStorageProfile(href=pvdc_sp.get('href'))))
if resource_guaranteed_memory is not None:
params.append(
E.ResourceGuaranteedMemory(resource_guaranteed_memory))
if resource_guaranteed_cpu is not None:
params.append(E.ResourceGuaranteedCpu(resource_guaranteed_cpu))
if vcpu_in_mhz is not None:
params.append(E.VCpuInMhz(vcpu_in_mhz))
if is_thin_provision is not None:
params.append(E.IsThinProvision(is_thin_provision))
if network_pool_name is not None:
npr = system.get_network_pool_reference(network_pool_name)
href = npr.get('href')
params.append(
E.NetworkPoolReference(
href=href,
id=href.split('/')[-1],
type=npr.get('type'),
name=npr.get('name')))
params.append(pvdc)
if uses_fast_provisioning is not None:
params.append(E.UsesFastProvisioning(uses_fast_provisioning))
if over_commit_allowed is not None:
params.append(E.OverCommitAllowed(over_commit_allowed))
if vm_discovery_enabled is not None:
params.append(E.VmDiscoveryEnabled(vm_discovery_enabled))
return self.client.post_linked_resource(
resource_admin, RelationType.ADD, EntityType.VDCS_PARAMS.value,
params)
| 43.240353 | 79 | 0.577118 |
c6306d2d5e96eb50b40f4580fa2008da4fb4d13a | 266 | py | Python | objects/node.py | emmaskychuks/cis479-search-algorithms | f77c709acac6559dd175485e8079e5fe76dd4b83 | [
"Apache-2.0"
] | null | null | null | objects/node.py | emmaskychuks/cis479-search-algorithms | f77c709acac6559dd175485e8079e5fe76dd4b83 | [
"Apache-2.0"
] | null | null | null | objects/node.py | emmaskychuks/cis479-search-algorithms | f77c709acac6559dd175485e8079e5fe76dd4b83 | [
"Apache-2.0"
] | null | null | null | class Node:
def __init__(self, x, y):
self.x = x
self.y = y
self.visited = False
self.value = "[]"
self.cost = 0
self.aStarCost = 0
def __lt__(self, other):
return self.aStarCost < other.aStarCost
| 22.166667 | 47 | 0.515038 |
c07031c7db03b69ec4a9fd0719ceec2ec49853ed | 4,342 | py | Python | contrib/seeds/generate-seeds.py | SinduNagalingam/KunoCoin | 4f4b3b5ede5a0780768b241d6f2d7f410cd14609 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | SinduNagalingam/KunoCoin | 4f4b3b5ede5a0780768b241d6f2d7f410cd14609 | [
"MIT"
] | 1 | 2019-01-04T13:55:16.000Z | 2019-01-31T09:54:47.000Z | contrib/seeds/generate-seeds.py | SinduNagalingam/KunoCoin | 4f4b3b5ede5a0780768b241d6f2d7f410cd14609 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the kunocoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19335)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.23741 | 98 | 0.579687 |
650f6896cbbdfc3766cd158e3fbf0e43e4fb48ce | 33,517 | py | Python | src/sentry/testutils/factories.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | src/sentry/testutils/factories.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/testutils/factories.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.conf import settings
from django.utils.importlib import import_module
import copy
import io
import os
import petname
import random
import six
import warnings
from django.utils import timezone
from django.utils.text import slugify
from hashlib import sha1
from loremipsum import Generator
from uuid import uuid4
from sentry.event_manager import EventManager
from sentry.constants import SentryAppStatus
from sentry.incidents.models import (
Incident,
IncidentGroup,
IncidentProject,
IncidentSeen,
IncidentActivity,
)
from sentry.mediators import sentry_apps, sentry_app_installations, service_hooks
from sentry.models import (
Activity, Environment, Event, EventError, EventMapping, Group, Organization, OrganizationMember,
OrganizationMemberTeam, Project, ProjectBookmark, Team, User, UserEmail, Release, Commit, ReleaseCommit,
CommitAuthor, Repository, CommitFileChange, ProjectDebugFile, File, UserPermission, EventAttachment,
UserReport, PlatformExternalIssue,
)
from sentry.models.integrationfeature import Feature, IntegrationFeature
from sentry.utils import json
from sentry.utils.canonical import CanonicalKeyDict
loremipsum = Generator()
def get_fixture_path(name):
return os.path.join(
os.path.dirname(__file__), # src/sentry/testutils/
os.pardir, # src/sentry/
os.pardir, # src/
os.pardir,
'tests',
'fixtures',
name
)
def make_sentence(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return ' '.join(random.choice(loremipsum.words) for _ in range(words))
def make_word(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return random.choice(loremipsum.words)
DEFAULT_EVENT_DATA = {
'extra': {
'loadavg': [0.97607421875, 0.88330078125, 0.833984375],
'sys.argv': [
'/Users/dcramer/.virtualenvs/sentry/bin/raven', 'test',
'https://ebc35f33e151401f9deac549978bda11:f3403f81e12e4c24942d505f086b2cad@sentry.io/1'
],
'user':
'dcramer'
},
'modules': {
'raven': '3.1.13'
},
'request': {
'cookies': {},
'data': {},
'env': {},
'headers': {},
'method': 'GET',
'query_string': '',
'url': 'http://example.com',
},
'stacktrace': {
'frames': [
{
'abs_path':
'www/src/sentry/models/foo.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'sentry/models/foo.py',
'function':
'build_msg',
'in_app':
True,
'lineno':
29,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
{
'abs_path':
'/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'raven/base.py',
'function':
'build_msg',
'in_app':
False,
'lineno':
290,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
],
},
'tags': [],
'platform': 'python',
}
def _patch_artifact_manifest(path, org, release, project=None):
manifest = json.loads(open(path, 'rb').read())
manifest['org'] = org
manifest['release'] = release
if project:
manifest['project'] = project
return json.dumps(manifest)
# TODO(dcramer): consider moving to something more scaleable like factoryboy
class Factories(object):
@staticmethod
def create_organization(name=None, owner=None, **kwargs):
if not name:
name = petname.Generate(2, ' ', letters=10).title()
org = Organization.objects.create(name=name, **kwargs)
if owner:
Factories.create_member(
organization=org,
user=owner,
role='owner',
)
return org
@staticmethod
def create_member(teams=None, **kwargs):
kwargs.setdefault('role', 'member')
om = OrganizationMember.objects.create(**kwargs)
if teams:
for team in teams:
Factories.create_team_membership(
team=team,
member=om,
)
return om
@staticmethod
def create_team_membership(team, member=None, user=None):
if member is None:
member, _ = OrganizationMember.objects.get_or_create(
user=user,
organization=team.organization,
defaults={
'role': 'member',
}
)
return OrganizationMemberTeam.objects.create(
team=team,
organizationmember=member,
is_active=True,
)
@staticmethod
def create_team(organization, **kwargs):
if not kwargs.get('name'):
kwargs['name'] = petname.Generate(2, ' ', letters=10).title()
if not kwargs.get('slug'):
kwargs['slug'] = slugify(six.text_type(kwargs['name']))
members = kwargs.pop('members', None)
team = Team.objects.create(organization=organization, **kwargs)
if members:
for user in members:
Factories.create_team_membership(team=team, user=user)
return team
@staticmethod
def create_environment(project, **kwargs):
name = kwargs.get('name', petname.Generate(3, ' ', letters=10)[:64])
env = Environment.objects.create(
organization_id=project.organization_id,
project_id=project.id,
name=name,
)
env.add_project(project, is_hidden=kwargs.get('is_hidden'))
return env
@staticmethod
def create_project(organization=None, teams=None, **kwargs):
if not kwargs.get('name'):
kwargs['name'] = petname.Generate(2, ' ', letters=10).title()
if not kwargs.get('slug'):
kwargs['slug'] = slugify(six.text_type(kwargs['name']))
if not organization and teams:
organization = teams[0].organization
project = Project.objects.create(organization=organization, **kwargs)
if teams:
for team in teams:
project.add_team(team)
return project
@staticmethod
def create_project_bookmark(project, user):
return ProjectBookmark.objects.create(project_id=project.id, user=user)
@staticmethod
def create_project_key(project):
return project.key_set.get_or_create()[0]
@staticmethod
def create_release(project, user=None, version=None, date_added=None):
if version is None:
version = os.urandom(20).encode('hex')
if date_added is None:
date_added = timezone.now()
release = Release.objects.create(
version=version,
organization_id=project.organization_id,
date_added=date_added,
)
release.add_project(project)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=Activity.get_version_ident(version),
user=user,
data={'version': version},
)
# add commits
if user:
author = Factories.create_commit_author(project=project, user=user)
repo = Factories.create_repo(project, name='organization-{}'.format(project.slug))
commit = Factories.create_commit(
project=project,
repo=repo,
author=author,
release=release,
key='deadbeef',
message='placeholder commit message',
)
release.update(
authors=[six.text_type(author.id)],
commit_count=1,
last_commit_id=commit.id,
)
return release
@staticmethod
def create_artifact_bundle(org, release, project=None):
import zipfile
bundle = io.BytesIO()
bundle_dir = get_fixture_path('artifact_bundle')
with zipfile.ZipFile(bundle, 'w', zipfile.ZIP_DEFLATED) as zipfile:
for path, _, files in os.walk(bundle_dir):
for filename in files:
fullpath = os.path.join(path, filename)
relpath = os.path.relpath(fullpath, bundle_dir)
if filename == 'manifest.json':
manifest = _patch_artifact_manifest(fullpath, org, release, project)
zipfile.writestr(relpath, manifest)
else:
zipfile.write(fullpath, relpath)
return bundle.getvalue()
@staticmethod
def create_repo(project, name=None):
repo = Repository.objects.create(
organization_id=project.organization_id,
name=name or '{}-{}'.format(petname.Generate(2, '',
letters=10), random.randint(1000, 9999)),
)
return repo
@staticmethod
def create_commit(repo, project=None, author=None, release=None,
message=None, key=None, date_added=None):
commit = Commit.objects.get_or_create(
organization_id=repo.organization_id,
repository_id=repo.id,
key=key or sha1(uuid4().hex).hexdigest(),
defaults={
'message': message or make_sentence(),
'author': author or Factories.create_commit_author(organization_id=repo.organization_id),
'date_added': date_added or timezone.now(),
}
)[0]
if release:
assert project
ReleaseCommit.objects.create(
organization_id=repo.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
Factories.create_commit_file_change(commit=commit, filename='/models/foo.py')
Factories.create_commit_file_change(commit=commit, filename='/worsematch/foo.py')
Factories.create_commit_file_change(commit=commit, filename='/models/other.py')
return commit
@staticmethod
def create_commit_author(organization_id=None, project=None, user=None):
return CommitAuthor.objects.get_or_create(
organization_id=organization_id or project.organization_id,
email=user.email if user else '{}@example.com'.format(make_word()),
defaults={
'name': user.name if user else make_word(),
}
)[0]
@staticmethod
def create_commit_file_change(commit, filename):
return CommitFileChange.objects.get_or_create(
organization_id=commit.organization_id,
commit=commit,
filename=filename,
type='M',
)
@staticmethod
def create_user(email=None, **kwargs):
if email is None:
email = uuid4().hex + '@example.com'
kwargs.setdefault('username', email)
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_active', True)
kwargs.setdefault('is_superuser', False)
user = User(email=email, **kwargs)
if not kwargs.get('password'):
user.set_password('admin')
user.save()
# UserEmail is created by a signal
assert UserEmail.objects.filter(
user=user,
email=email,
).update(is_verified=True)
return user
@staticmethod
def create_useremail(user, email, **kwargs):
if not email:
email = uuid4().hex + '@example.com'
kwargs.setdefault('is_verified', True)
useremail = UserEmail(user=user, email=email, **kwargs)
useremail.save()
return useremail
@staticmethod
def create_event(group, event_id=None, normalize=True, **kwargs):
# XXX: Do not use this method for new tests! Prefer `store_event`.
if event_id is None:
event_id = uuid4().hex
kwargs.setdefault('project', group.project)
kwargs.setdefault('data', copy.deepcopy(DEFAULT_EVENT_DATA))
kwargs.setdefault('platform', kwargs['data'].get('platform', 'python'))
kwargs.setdefault('message', kwargs['data'].get('message', 'message'))
if kwargs.get('tags'):
tags = kwargs.pop('tags')
if isinstance(tags, dict):
tags = list(tags.items())
kwargs['data']['tags'] = tags
if kwargs.get('stacktrace'):
stacktrace = kwargs.pop('stacktrace')
kwargs['data']['stacktrace'] = stacktrace
user = kwargs.pop('user', None)
if user is not None:
kwargs['data']['user'] = user
kwargs['data'].setdefault(
'errors', [{
'type': EventError.INVALID_DATA,
'name': 'foobar',
}]
)
# maintain simple event Factories by supporting the legacy message
# parameter just like our API would
if 'logentry' not in kwargs['data']:
kwargs['data']['logentry'] = {
'message': kwargs['message'] or '<unlabeled event>',
}
if normalize:
manager = EventManager(CanonicalKeyDict(kwargs['data']))
manager.normalize()
kwargs['data'] = manager.get_data()
kwargs['data'].update(manager.materialize_metadata())
kwargs['message'] = manager.get_search_message()
# This is needed so that create_event saves the event in nodestore
# under the correct key. This is usually dont in EventManager.save()
kwargs['data'].setdefault(
'node_id',
Event.generate_node_id(kwargs['project'].id, event_id)
)
event = Event(event_id=event_id, group=group, **kwargs)
EventMapping.objects.create(
project_id=event.project.id,
event_id=event_id,
group=group,
)
# emulate EventManager refs
event.data.bind_ref(event)
event.save()
return event
@staticmethod
def store_event(data, project_id, assert_no_errors=True):
# Like `create_event`, but closer to how events are actually
# ingested. Prefer to use this method over `create_event`
manager = EventManager(data)
manager.normalize()
if assert_no_errors:
errors = manager.get_data().get('errors')
assert not errors, errors
event = manager.save(project_id)
event.group.save()
return event
@staticmethod
def create_full_event(group, event_id='a', **kwargs):
payload = """
{
"event_id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"logentry": {"formatted": "TypeError: Cannot read property '1' of null"},
"tags": [
["environment", "prod"],
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0",
"id": "41656",
"email": "test@example.com"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
event = Factories.create_event(
group=group,
event_id=event_id, platform='javascript',
data=json.loads(payload),
# This payload already went through sourcemap
# processing, normalizing it would remove
# frame.data (orig_filename, etc)
normalize=False
)
return event
@staticmethod
def create_group(project, checksum=None, **kwargs):
if checksum:
warnings.warn('Checksum passed to create_group', DeprecationWarning)
kwargs.setdefault('message', 'Hello world')
kwargs.setdefault('data', {})
if 'type' not in kwargs['data']:
kwargs['data'].update(
{
'type': 'default',
'metadata': {
'title': kwargs['message'],
},
}
)
if 'short_id' not in kwargs:
kwargs['short_id'] = project.next_short_id()
return Group.objects.create(project=project, **kwargs)
@staticmethod
def create_file(**kwargs):
return File.objects.create(**kwargs)
@staticmethod
def create_file_from_path(path, name=None, **kwargs):
if name is None:
name = os.path.basename(path)
file = Factories.create_file(name=name, **kwargs)
with open(path) as f:
file.putfile(f)
return file
@staticmethod
def create_event_attachment(event, file=None, **kwargs):
if file is None:
file = Factories.create_file(
name='log.txt',
size=32,
headers={'Content-Type': 'text/plain'},
checksum='dc1e3f3e411979d336c3057cce64294f3420f93a',
)
return EventAttachment.objects.create(
project_id=event.project_id,
group_id=event.group_id,
event_id=event.event_id,
file=file,
**kwargs
)
@staticmethod
def create_dif_file(project, debug_id=None, object_name=None,
features=None, data=None, file=None, cpu_name=None,
code_id=None, **kwargs):
if debug_id is None:
debug_id = six.text_type(uuid4())
if object_name is None:
object_name = '%s.dSYM' % debug_id
if features is not None:
if data is None:
data = {}
data['features'] = features
if file is None:
file = Factories.create_file(
name=object_name,
size=42,
headers={'Content-Type': 'application/x-mach-binary'},
checksum='dc1e3f3e411979d336c3057cce64294f3420f93a',
)
return ProjectDebugFile.objects.create(
debug_id=debug_id,
code_id=code_id,
project=project,
object_name=object_name,
cpu_name=cpu_name or 'x86_64',
file=file,
data=data,
**kwargs
)
@staticmethod
def create_dif_from_path(path, object_name=None, **kwargs):
if object_name is None:
object_name = os.path.basename(path)
headers = {'Content-Type': 'application/x-mach-binary'}
file = Factories.create_file_from_path(path, name=object_name, headers=headers)
return Factories.create_dif_file(file=file, object_name=object_name, **kwargs)
@staticmethod
def add_user_permission(user, permission):
UserPermission.objects.create(user=user, permission=permission)
@staticmethod
def create_sentry_app(**kwargs):
app = sentry_apps.Creator.run(
**Factories._sentry_app_kwargs(**kwargs)
)
if kwargs.get('published'):
app.update(status=SentryAppStatus.PUBLISHED)
return app
@staticmethod
def create_internal_integration(**kwargs):
return sentry_apps.InternalCreator.run(
**Factories._sentry_app_kwargs(**kwargs)
)
@staticmethod
def _sentry_app_kwargs(**kwargs):
_kwargs = {
'user': kwargs.get('user', Factories.create_user()),
'name': kwargs.get('name', petname.Generate(2, ' ', letters=10).title()),
'organization': kwargs.get('organization', Factories.create_organization()),
'author': kwargs.get('author', 'A Company'),
'scopes': kwargs.get('scopes', ()),
'webhook_url': kwargs.get('webhook_url', 'https://example.com/webhook'),
'events': [],
'schema': {},
}
_kwargs.update(**kwargs)
return _kwargs
@staticmethod
def create_sentry_app_installation(organization=None, slug=None, user=None):
if not organization:
organization = Factories.create_organization()
Factories.create_project(organization=organization)
return sentry_app_installations.Creator.run(
slug=(slug or Factories.create_sentry_app().slug),
organization=organization,
user=(user or Factories.create_user()),
)
@staticmethod
def create_issue_link_schema():
return {
'type': 'issue-link',
'link': {
'uri': '/sentry/issues/link',
'required_fields': [
{
'type': 'select',
'name': 'assignee',
'label': 'Assignee',
'uri': '/sentry/members',
},
],
},
'create': {
'uri': '/sentry/issues/create',
'required_fields': [
{
'type': 'text',
'name': 'title',
'label': 'Title',
},
{
'type': 'text',
'name': 'summary',
'label': 'Summary',
},
],
'optional_fields': [
{
'type': 'select',
'name': 'points',
'label': 'Points',
'options': [
['1', '1'],
['2', '2'],
['3', '3'],
['5', '5'],
['8', '8'],
],
},
{
'type': 'select',
'name': 'assignee',
'label': 'Assignee',
'uri': '/sentry/members',
},
],
},
}
@staticmethod
def create_alert_rule_action_schema():
return {
'type': 'alert-rule-action',
'required_fields': [{
'type': 'text',
'name': 'channel',
'label': 'Channel',
}],
}
@staticmethod
def create_service_hook(actor=None, org=None, project=None,
events=None, url=None, **kwargs):
if not actor:
actor = Factories.create_user()
if not org:
org = Factories.create_organization(owner=actor)
if not project:
project = Factories.create_project(organization=org)
if events is None:
events = ('event.created',)
if not url:
url = 'https://example.com/sentry/webhook'
_kwargs = {
'actor': actor,
'projects': [project],
'organization': org,
'events': events,
'url': url,
}
_kwargs.update(kwargs)
return service_hooks.Creator.run(**_kwargs)
@staticmethod
def create_sentry_app_feature(feature=None, sentry_app=None, description=None):
if not sentry_app:
sentry_app = Factories.create_sentry_app()
integration_feature = IntegrationFeature.objects.create(
sentry_app=sentry_app,
feature=feature or Feature.API,
)
if description:
integration_feature.update(user_description=description)
return integration_feature
@staticmethod
def create_userreport(group, project=None, event_id=None, **kwargs):
return UserReport.objects.create(
group=group,
event_id=event_id or 'a' * 32,
project=project or group.project,
name='Jane Doe',
email='jane@example.com',
comments="the application crashed",
**kwargs
)
@staticmethod
def create_session():
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
return session
@staticmethod
def create_platform_external_issue(group=None, service_type=None,
display_name=None, web_url=None):
return PlatformExternalIssue.objects.create(
group_id=group.id,
service_type=service_type,
display_name=display_name,
web_url=web_url,
)
@staticmethod
def create_incident(
organization, projects, detection_uuid=None, status=1,
title=None, query='test query', date_started=None, date_detected=None,
date_closed=None, groups=None, seen_by=None,
):
if not title:
title = petname.Generate(2, ' ', letters=10).title()
incident = Incident.objects.create(
organization=organization,
detection_uuid=detection_uuid,
status=status,
title=title,
query=query,
date_started=date_started or timezone.now(),
date_detected=date_detected or timezone.now(),
date_closed=date_closed or timezone.now(),
)
for project in projects:
IncidentProject.objects.create(incident=incident, project=project)
if groups:
for group in groups:
IncidentGroup.objects.create(incident=incident, group=group)
if seen_by:
for user in seen_by:
IncidentSeen.objects.create(incident=incident, user=user, last_seen=timezone.now())
return incident
@staticmethod
def create_incident_activity(incident, type, comment=None, user=None):
return IncidentActivity.objects.create(
incident=incident,
type=type,
comment=comment,
user=user,
)
| 35.580679 | 324 | 0.512874 |
95c476941ffe36ed5adfcfccfe7c9a96cb15454d | 1,882 | py | Python | tests/testing/helpers/test__check_dfs_passed.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | tests/testing/helpers/test__check_dfs_passed.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | tests/testing/helpers/test__check_dfs_passed.py | munichpavel/tubular | 53e277dea2cc869702f2ed49f2b495bf79b92355 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import inspect
import tubular.testing.helpers as h
import pandas as pd
def test_arguments():
"""Test arguments for arguments of tubular.testing.helpers._check_dfs_passed."""
expected_arguments = ["df_1", "df_2"]
arg_spec = inspect.getfullargspec(h._check_dfs_passed)
arguments = arg_spec.args
assert len(expected_arguments) == len(
arguments
), f"Incorrect number of arguments -\n Expected: {len(expected_arguments)}\n Actual: {len(arguments)}"
assert (
expected_arguments == arguments
), f"Incorrect arguments -\n Expected: {expected_arguments}\n Actual: {arguments}"
default_values = arg_spec.defaults
assert (
default_values is None
), f"Unexpected default values -\n Expected: None\n Actual: {default_values}"
def test_exceptions_raised():
"""Test that the expected exceptions are raised by tubular.testing.helpers._check_dfs_passed."""
with pytest.raises(
TypeError, match=r"expecting first positional arg to be a pd.DataFrame.*"
):
h._check_dfs_passed(1, pd.DataFrame())
with pytest.raises(
TypeError, match=r"expecting second positional arg to be a pd.DataFrame.*"
):
h._check_dfs_passed(pd.DataFrame(), 1)
with pytest.raises(
ValueError,
match=r"expecting first positional arg and second positional arg to have equal number of rows but got\n 1\n 0",
):
h._check_dfs_passed(pd.DataFrame({"a": 1}, index=[0]), pd.DataFrame())
with pytest.raises(
ValueError,
match=r"expecting indexes for first positional arg and second positional arg to be the same but got\n Int64Index\(\[0\], dtype='int64'\)\n Int64Index\(\[1\], dtype='int64'\)",
):
h._check_dfs_passed(
pd.DataFrame({"a": 1}, index=[0]), pd.DataFrame({"a": 1}, index=[1])
)
| 30.852459 | 185 | 0.665781 |
711fae526a87b18a29754b5f97410b01a778587f | 11,178 | py | Python | src/rayoptics/elem/surface.py | ajeddeloh/ray-optics | 63776c4efd06378b7742c14f453cf6f333dfd675 | [
"BSD-3-Clause"
] | null | null | null | src/rayoptics/elem/surface.py | ajeddeloh/ray-optics | 63776c4efd06378b7742c14f453cf6f333dfd675 | [
"BSD-3-Clause"
] | null | null | null | src/rayoptics/elem/surface.py | ajeddeloh/ray-optics | 63776c4efd06378b7742c14f453cf6f333dfd675 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2017 Michael J. Hayford
""" Module for optical surface related classes
Surface
Container of profile, extent, position and orientation information of
the surface
DecenterData
Maintains data and actions to support 4 types of position and
orientation changes.
- DEC: pos and orientation applied prior to surface
- REV: pos and orientation applied following surface in reverse
- DAR: pos and orientation applied prior to surface and then returned to initial frame
- BEN: used for fold mirrors, orientation applied before and after surface
Aperture
- Circular
- Rectangular
- Elliptical
.. Created on Sat Sep 16 09:22:05 2017
.. codeauthor: Michael J. Hayford
"""
from enum import Enum, auto
from math import sqrt
import numpy as np
from rayoptics.seq import interface
from . import profiles
import transforms3d as t3d
from rayoptics.optical.model_enums import DecenterType as dec
from rayoptics.raytr.traceerror import TraceError
class InteractionMode(Enum):
""" enum for different interact_mode specifications
Retained to restore old files
.. deprecated:: 0.4.5
"""
Transmit = auto() #: propagate in transmission at this interface
Reflect = auto() #: propagate in reflection at this interface
class Surface(interface.Interface):
""" Container of profile, extent, position and orientation. """
def __init__(self, lbl='', profile=None,
clear_apertures=None, edge_apertures=None,
**kwargs):
super().__init__(**kwargs)
self.label = lbl
if profile:
self.profile = profile
else:
self.profile = profiles.Spherical()
self.clear_apertures = clear_apertures if clear_apertures else []
self.edge_apertures = edge_apertures if edge_apertures else []
def __repr__(self):
if len(self.label) > 0:
return "{!s}(lbl={!r}, profile={!r}, interact_mode={!s})" \
.format(type(self).__name__,
self.label, self.profile, self.interact_mode)
else:
return "{!s}(profile={!r}, interact_mode={!s})" \
.format(type(self).__name__,
self.profile, self.interact_mode)
def interface_type(self):
return type(self.profile).__name__
def update(self):
super().update()
self.profile.update()
def sync_to_restore(self, opt_model):
super().sync_to_restore(opt_model)
for ca in self.clear_apertures:
ca.sync_to_restore(opt_model)
for ea in self.edge_apertures:
ea.sync_to_restore(opt_model)
@property
def profile_cv(self):
return self.profile.cv
@profile_cv.setter
def profile_cv(self, cv):
self.profile.cv = cv
@property
def optical_power(self):
return self.delta_n * self.profile.cv
@optical_power.setter
def optical_power(self, pwr):
self.profile.cv = pwr/self.delta_n if self.delta_n != 0.0 else 0.0
def set_optical_power(self, pwr, n_before, n_after):
self.delta_n = n_after - n_before
self.optical_power = pwr
def apply_scale_factor(self, scale_factor):
super().apply_scale_factor(scale_factor)
self.max_aperture *= scale_factor
self.profile.apply_scale_factor(scale_factor)
for e in self.edge_apertures:
e.apply_scale_factor(scale_factor)
for ca in self.clear_apertures:
ca.apply_scale_factor(scale_factor)
def from_first_order(self, nu_before, nu_after, y):
pass
def z_sag(self, pt):
return self.profile.sag(0., pt[1])
def set_z_sag(self, pt):
self.profile.cv = self.calc_cv_from_zsag(pt)
def calc_cv_from_zsag(self, pt):
x, y = pt
cv = 2*x / (x**2 + y**2)
return cv
def surface_od(self):
od = 0
if len(self.edge_apertures) > 0:
for e in self.edge_apertures:
edg = e.max_dimension()
if edg > od:
od = edg
elif len(self.clear_apertures) > 0:
for ca in self.clear_apertures:
ap = ca.max_dimension()
if ap > od:
od = ap
else:
od = self.max_aperture
return od
def get_y_aperture_extent(self):
""" returns [y_min, y_max] for the union of apertures """
od = [1.0e10, -1.0e10]
if len(self.edge_apertures) > 0:
for e in self.edge_apertures:
edg = e.bounding_box()
if edg[0][1] < od[0]:
od[0] = edg[0][1]
if edg[1][1] > od[1]:
od[1] = edg[1][1]
elif len(self.clear_apertures) > 0:
for ca in self.clear_apertures:
ap = ca.bounding_box()
if ap[0][1] < od[0]:
od[0] = ap[0][1]
if ap[1][1] > od[1]:
od[1] = ap[1][1]
else:
od = [-self.max_aperture, self.max_aperture]
return od
def full_profile(self, edge_extent, flat_id=None, dir=1, steps=6):
if flat_id is None:
return self.profile.profile(edge_extent, dir, steps)
else:
if len(edge_extent) == 1:
sd_upr = edge_extent[0]
sd_lwr = -edge_extent[0]
else:
sd_upr = edge_extent[1]
sd_lwr = edge_extent[0]
if dir < 0:
sd_lwr, sd_upr = sd_upr, sd_lwr
prf = []
try:
sag = self.profile.sag(0, flat_id)
except TraceError:
sag = None
else:
prf.append([sag, sd_lwr])
prf += self.profile.profile((flat_id,), dir, steps)
if sag is not None:
prf.append([sag, sd_upr])
return prf
def intersect(self, p0, d, eps=1.0e-12, z_dir=1.0):
return self.profile.intersect(p0, d, eps, z_dir)
def normal(self, p):
return self.profile.normal(p)
class DecenterData():
""" Maintains data and actions for position and orientation changes.
- LOCAL: pos and orientation applied prior to surface
- REV: pos and orientation applied following surface in reverse
- DAR: pos and orientation applied prior to surface and then returned to initial frame
- BEND: used for fold mirrors, orientation applied before and after surface
"""
def __init__(self, dtype, x=0., y=0., alpha=0., beta=0., gamma=0.):
self.dtype = dtype
# x, y, z vertex decenter
self.dec = np.array([x, y, 0.])
# alpha, beta, gamma euler angles
self.euler = np.array([alpha, beta, gamma])
# x, y, z rotation point offset
self.rot_pt = np.array([0., 0., 0.])
self.rot_mat = None
def __repr__(self):
return "%r: Decenter: %r, Tilt: %r" % (self.dtype.name, self.dec,
self.euler)
def update(self):
def convertl2r(self):
return np.array([-self.euler[0], -self.euler[1], self.euler[2]])
if self.euler.any():
self.rot_mat = t3d.euler.euler2mat(*np.deg2rad(convertl2r(self)))
else:
self.rot_mat = None
def apply_scale_factor(self, scale_factor):
self.dec *= scale_factor
self.rot_pt *= scale_factor
def tform_before_surf(self):
if self.dtype is not dec.REV:
return self.rot_mat, self.dec
else:
return None, np.array([0., 0., 0.])
def tform_after_surf(self):
if self.dtype is dec.REV or self.dtype is dec.DAR:
rt = self.rot_mat
if self.rot_mat is not None:
rt = self.rot_mat.transpose()
return rt, -self.dec
elif self.dtype is dec.BEND:
return self.rot_mat, np.array([0., 0., 0.])
else:
return None, np.array([0., 0., 0.])
class Aperture():
def __init__(self, x_offset=0.0, y_offset=0.0, rotation=0.0):
self.x_offset = x_offset
self.y_offset = y_offset
self.rotation = rotation
def sync_to_restore(self, opt_model):
if not hasattr(self, 'x_offset'):
self.x_offset = 0.0
if not hasattr(self, 'y_offset'):
self.y_offset = 0.0
if not hasattr(self, 'rotation'):
self.rotation = 0.0
def dimension(self):
pass
def set_dimension(self, x, y):
pass
def max_dimension(self):
x, y = self.dimension()
return sqrt(x*x + y*y)
def point_inside(self, x, y):
pass
def bounding_box(self):
center = np.array([self.x_offset, self.y_offset])
extent = np.array(self.dimension())
return center-extent, center+extent
def apply_scale_factor(self, scale_factor):
self.x_offset *= scale_factor
self.y_offset *= scale_factor
def tform(self, x, y):
x -= self.x_offset
y -= self.y_offset
return x, y
class Circular(Aperture):
def __init__(self, radius=1.0, **kwargs):
super().__init__(**kwargs)
self.radius = radius
def dimension(self):
return (self.radius, self.radius)
def set_dimension(self, x, y):
self.radius = sqrt(x*x + y*y)
def max_dimension(self):
return self.radius
def point_inside(self, x, y):
x, y = self.tform(x, y)
return sqrt(x*x + y*y) <= self.radius
def apply_scale_factor(self, scale_factor):
super().apply_scale_factor(scale_factor)
self.radius *= scale_factor
class Rectangular(Aperture):
def __init__(self, x_half_width=1.0, y_half_width=1.0, **kwargs):
super().__init__(**kwargs)
self.x_half_width = x_half_width
self.y_half_width = y_half_width
def dimension(self):
return (self.x_half_width, self.y_half_width)
def set_dimension(self, x, y):
self.x_half_width = abs(x)
self.y_half_width = abs(y)
def point_inside(self, x, y):
x, y = self.tform(x, y)
return abs(x) <= self.x_half_width and abs(y) <= self.y_half_width
def apply_scale_factor(self, scale_factor):
super().apply_scale_factor(scale_factor)
self.x_half_width *= scale_factor
self.y_half_width *= scale_factor
class Elliptical(Aperture):
def __init__(self, x_half_width=1.0, y_half_width=1.0, **kwargs):
super().__init__(**kwargs)
self.x_half_width = x_half_width
self.y_half_width = y_half_width
def dimension(self):
return (self.x_half_width, self.y_half_width)
def set_dimension(self, x, y):
self.x_half_width = abs(x)
self.y_half_width = abs(y)
def apply_scale_factor(self, scale_factor):
super().apply_scale_factor(scale_factor)
self.x_half_width *= scale_factor
self.y_half_width *= scale_factor
| 30.793388 | 96 | 0.584809 |
57f6b2fa6969012414c648eae5e4bd49ec118473 | 4,251 | py | Python | mthree/test/test_utils.py | gadial/mthree | b351c358e0be45c1e6bf73e5fce5540736cce998 | [
"Apache-2.0"
] | 17 | 2021-08-31T04:34:31.000Z | 2022-03-04T13:07:22.000Z | mthree/test/test_utils.py | gadial/mthree | b351c358e0be45c1e6bf73e5fce5540736cce998 | [
"Apache-2.0"
] | 35 | 2021-08-16T15:56:32.000Z | 2022-03-23T16:34:35.000Z | mthree/test/test_utils.py | gadial/mthree | b351c358e0be45c1e6bf73e5fce5540736cce998 | [
"Apache-2.0"
] | 6 | 2021-08-16T15:47:36.000Z | 2022-01-31T15:16:03.000Z | # This code is part of Mthree.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module
"""Test utils functions"""
import numpy as np
from qiskit import QuantumCircuit, execute
from qiskit.test.mock import FakeAthens
import mthree
def test_gen_dist0():
"""Verify that expval of 1 circuit raw counts gives same as dist=0 solution."""
backend = FakeAthens()
qc = QuantumCircuit(4)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.measure_all()
raw_counts = execute(qc, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system()
mit_counts = mit.apply_correction(raw_counts, qubits=range(4),
return_mitigation_overhead=True,
distance=0)
assert np.allclose(mthree.utils.expval(raw_counts), mit_counts.expval())
assert np.allclose(mthree.utils.expval(mit_counts), mit_counts.expval())
assert np.allclose(mthree.utils.expval(mit_counts, 'IZZI'), mit_counts.expval('IZZI'))
assert np.allclose(mthree.utils.stddev(raw_counts), mit_counts.stddev())
def test_gen_multi_dist0():
"""Verify that expval of multi circuit raw counts gives same as dist=0 solution."""
backend = FakeAthens()
qc = QuantumCircuit(4)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.measure_all()
raw_counts = execute([qc]*5, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system()
mit_counts = mit.apply_correction(raw_counts, qubits=range(4),
return_mitigation_overhead=True,
distance=0)
assert np.allclose(mthree.utils.expval(raw_counts), mit_counts.expval())
assert np.allclose(mthree.utils.expval(mit_counts), mit_counts.expval())
assert np.allclose(mthree.utils.expval(mit_counts, 'IZZI'), mit_counts.expval('IZZI'))
dicts = [dict(rc) for rc in raw_counts]
assert np.allclose(mthree.utils.expval(dicts), mit_counts.expval())
assert np.allclose(mthree.utils.stddev(raw_counts), mit_counts.stddev())
def test_gen_full_dist():
"""Verify that things work for non-trivial mitigation"""
backend = FakeAthens()
qc = QuantumCircuit(4)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.measure_all()
raw_counts = execute(qc, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system()
mit_counts = mit.apply_correction(raw_counts, qubits=range(4),
return_mitigation_overhead=True)
assert np.allclose(mthree.utils.expval(mit_counts), mit_counts.expval())
assert np.allclose(mthree.utils.stddev(mit_counts), mit_counts.stddev())
probs = mit_counts.nearest_probability_distribution()
assert np.allclose(mthree.utils.expval(probs), probs.expval())
assert np.allclose(mthree.utils.stddev(probs), probs.stddev())
def test_gen_multi_full_dist():
"""Verify that things work for non-trivial mitigation of multi circuits"""
backend = FakeAthens()
qc = QuantumCircuit(4)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.measure_all()
raw_counts = execute([qc]*5, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system()
mit_counts = mit.apply_correction(raw_counts, qubits=range(4),
return_mitigation_overhead=True)
assert np.allclose(mthree.utils.expval(mit_counts), mit_counts.expval())
assert np.allclose(mthree.utils.stddev(mit_counts), mit_counts.stddev())
probs = mit_counts.nearest_probability_distribution()
assert np.allclose(mthree.utils.expval(probs), probs.expval())
assert np.allclose(mthree.utils.stddev(probs), probs.stddev())
| 36.965217 | 90 | 0.67937 |
0c1c5266c29b46fe19ad3037f490d065a87bb2c8 | 1,404 | py | Python | GridControlformyRs2/src/Scripts/python/pythonpath/indoc/dialogs.py | p--q/GridControlformyRs2 | 22b6b70d64fdfbfa16eeceda577c4a129856ed35 | [
"BSD-3-Clause"
] | null | null | null | GridControlformyRs2/src/Scripts/python/pythonpath/indoc/dialogs.py | p--q/GridControlformyRs2 | 22b6b70d64fdfbfa16eeceda577c4a129856ed35 | [
"BSD-3-Clause"
] | null | null | null | GridControlformyRs2/src/Scripts/python/pythonpath/indoc/dialogs.py | p--q/GridControlformyRs2 | 22b6b70d64fdfbfa16eeceda577c4a129856ed35 | [
"BSD-3-Clause"
] | null | null | null | #!/opt/libreoffice5.4/program/python
# -*- coding: utf-8 -*-
# import pydevd; pydevd.settrace(stdoutToServer=True, stderrToServer=True)
from indoc import staticdialog3, historydialog8, datedialog1
from com.sun.star.awt import MouseButton # 定数
def mousePressed(enhancedmouseevent, xscriptcontext): # マウスボタンを押した時。controllerにコンテナウィンドウはない。
selection = enhancedmouseevent.Target # ターゲットのセルを取得。
if enhancedmouseevent.Buttons==MouseButton.LEFT: # 左ボタンのとき
if selection.supportsService("com.sun.star.sheet.SheetCell"): # ターゲットがセルの時。
if enhancedmouseevent.ClickCount==2: # ダブルクリックの時。
sheet = selection.getSpreadsheet()
celladdress = selection.getCellAddress()
r, c = celladdress.Row, celladdress.Column
dialogname = sheet[0, c].getString()
if r>0:
defaultrows = "item1", "item2", "item3", "item4"
if dialogname=="staticdialog3": # 静的ダイアログ。ポップアップメニューアイテムを名前で取得に変更。
staticdialog3.createDialog(xscriptcontext, enhancedmouseevent, dialogname, defaultrows)
elif dialogname=="historydialog8": # 履歴ダイアログ。選択行インデックスの取得方法、スクロール、を修正。
historydialog8.createDialog(xscriptcontext, enhancedmouseevent, dialogname, defaultrows)
elif dialogname=="datedialog1": # 日付ダイアログ。
datedialog1.createDialog(xscriptcontext, enhancedmouseevent, dialogname, "YYYY/M/D")
return False # セル編集モードにしない。
return True # セル編集モードにする。
| 52 | 98 | 0.736467 |
c6dd0611e82d3158def703a4c1ab169c49fe6878 | 1,041 | py | Python | com/vmware/vapi/security/privilege.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/security/privilege.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | com/vmware/vapi/security/privilege.py | sumitrsystems/Vmware | 7705d9979bee71f02c71d63890616409044cba08 | [
"MIT"
] | null | null | null | """
Privilege Validator interface
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2018 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
from vmware.vapi.lib.log import get_vapi_logger
# Configure logging
logger = get_vapi_logger(__name__)
class PrivilegeValidator(object):
"""
Interface for Privilege Validation
"""
def validate(self, user_identity, required_privileges):
"""
Validate the privileges required for a given user identity
"""
raise NotImplementedError
def __hash__(self):
return str(self).__hash__()
# Privilege Validator instance
_privilege_validator = None
def get_privilege_validator(privilege_validator=None):
"""
Returns the singleton PrivilegeValidator instance
:type: :class:`str`
:param: Privilege Validator class
"""
global _privilege_validator
if _privilege_validator is None:
_privilege_validator = privilege_validator()
return _privilege_validator
| 23.133333 | 123 | 0.716619 |
de86f53884f81bd748b2da70a8e18c08c63c0801 | 1,727 | py | Python | first_strike/controllers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | first_strike/controllers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | first_strike/controllers.py | WoolleySheep/first-strike | 15f93cccba3cfdb2c2b75524004a08bfef42235a | [
"MIT"
] | null | null | null | from copy import deepcopy
from meta_controller import RocketMetaController, TurretMetaController
class Controllers:
def __init__(self, parameters, history, controller_parameters):
self.parameters = parameters
self.history = history
self.state_copy = [None, None]
(
rocket_active_controller,
turret_active_controller,
rocket_raise_errors,
turret_raise_errors,
rocket_check_execution_time,
turret_check_execution_time,
) = controller_parameters
self.rocket_controller = RocketMetaController(
parameters,
history,
self.state_copy,
rocket_active_controller,
rocket_raise_errors,
rocket_check_execution_time,
)
self.turret_controller = TurretMetaController(
parameters,
history,
self.state_copy,
turret_active_controller,
turret_raise_errors,
turret_check_execution_time,
)
@property
def issue_raised(self):
return (
self.rocket_controller.issue_raised or self.turret_controller.issue_raised
)
def store_state_copy(self):
self.state_copy[0] = deepcopy(self.parameters)
self.state_copy[1] = deepcopy(self.history)
def process_inputs(self):
self.store_state_copy()
self.rocket_controller.process_inputs()
if self.rocket_controller.state_changed:
return
self.turret_controller.process_inputs()
if not self.issue_raised:
self.rocket_controller.store_inputs()
self.turret_controller.store_inputs()
| 28.783333 | 86 | 0.636943 |
70ec485106a40306286a717be4c2cb42a7898b59 | 739 | py | Python | lib_pro/processor/utils/box_coder_builder.py | laobadao/TF_VS_Caffe | 943b47daefa42f07db285a331647d09669085f9f | [
"MIT"
] | null | null | null | lib_pro/processor/utils/box_coder_builder.py | laobadao/TF_VS_Caffe | 943b47daefa42f07db285a331647d09669085f9f | [
"MIT"
] | null | null | null | lib_pro/processor/utils/box_coder_builder.py | laobadao/TF_VS_Caffe | 943b47daefa42f07db285a331647d09669085f9f | [
"MIT"
] | null | null | null | """A function to build an object detection box coder from configuration."""
from ..utils import faster_rcnn_box_coder
def build(box_coder_config):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
y_scale = 10.0
x_scale = 10.0
height_scale = 5.0
width_scale = 5.0
if box_coder_config == 'faster_rcnn_box_coder':
return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[y_scale, x_scale, height_scale, width_scale])
raise ValueError('Empty box coder.')
| 28.423077 | 116 | 0.703654 |
da4ce6f0d40f035602b0b4af9d2bd6917ff99c30 | 2,586 | py | Python | neo/Prompt/Commands/BuildNRun.py | iNomaD/neo-python | bf27e91c041daa05b1d73c96d97a69777048f3da | [
"MIT"
] | null | null | null | neo/Prompt/Commands/BuildNRun.py | iNomaD/neo-python | bf27e91c041daa05b1d73c96d97a69777048f3da | [
"MIT"
] | null | null | null | neo/Prompt/Commands/BuildNRun.py | iNomaD/neo-python | bf27e91c041daa05b1d73c96d97a69777048f3da | [
"MIT"
] | null | null | null | from neo.Prompt.Utils import get_arg
from neo.Prompt.Commands.LoadSmartContract import GatherLoadedContractParams
from neo.Prompt.Commands.Invoke import test_deploy_and_invoke
from neocore.Fixed8 import Fixed8
from boa.compiler import Compiler
import binascii
import traceback
def LoadAndRun(arguments, wallet):
path = get_arg(arguments)
try:
with open(path, 'rb') as f:
content = f.read()
try:
content = binascii.unhexlify(content)
except Exception as e:
pass
script = content
print("arguments.... %s " % arguments)
DoRun(script, arguments, wallet, path)
except Exception as e:
print("Could not load script %s " % e)
def BuildAndRun(arguments, wallet):
path = get_arg(arguments)
try:
contract_script = Compiler.instance().load_and_save(path)
newpath = path.replace('.py', '.avm')
print("Saved output to %s " % newpath)
DoRun(contract_script, arguments, wallet, path)
except Exception as e:
print("Could not compile %s " % e)
def DoRun(contract_script, arguments, wallet, path):
try:
test = get_arg(arguments, 1)
if test is not None and test == 'test':
if wallet is not None:
f_args = arguments[2:]
i_args = arguments[6:]
script = GatherLoadedContractParams(f_args, contract_script)
tx, result, total_ops = test_deploy_and_invoke(script, i_args, wallet)
i_args.reverse()
if tx is not None and result is not None:
print("\n-----------------------------------------------------------")
print("Calling %s with arguments %s " % (path, i_args))
print("Test deploy invoke successful")
print("Used total of %s operations " % total_ops)
print("Result %s " % result)
print("Invoke TX gas cost: %s " % (tx.Gas.value / Fixed8.D))
print("-------------------------------------------------------------\n")
return
else:
print("Test invoke failed")
print("tx is, results are %s %s " % (tx, result))
return
else:
print("please open a wallet to test built contract")
except Exception as e:
print("could not bulid %s " % e)
traceback.print_stack()
traceback.print_exc()
| 28.417582 | 92 | 0.530162 |
2f34acacfe2bc2c4fc354fdc2e1155b839edcac1 | 2,521 | py | Python | sdk/python/pulumi_gcp/compute/project_metadata.py | pulumi-bot/pulumi-gcp | 43ff11bf1c99b4e9e493f61d9755e359b686ae67 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/compute/project_metadata.py | pulumi-bot/pulumi-gcp | 43ff11bf1c99b4e9e493f61d9755e359b686ae67 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/compute/project_metadata.py | pulumi-bot/pulumi-gcp | 43ff11bf1c99b4e9e493f61d9755e359b686ae67 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class ProjectMetadata(pulumi.CustomResource):
"""
Manages metadata common to all instances for a project in GCE. For more information see
[the official documentation](https://cloud.google.com/compute/docs/storing-retrieving-metadata)
and
[API](https://cloud.google.com/compute/docs/reference/latest/projects/setCommonInstanceMetadata).
~> **Note:** If you want to manage only single key/value pairs within the project metadata
rather than the entire set, then use
[google_compute_project_metadata_item](compute_project_metadata_item.html).
"""
def __init__(__self__, __name__, __opts__=None, metadata=None, project=None):
"""Create a ProjectMetadata resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, basestring):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not metadata:
raise TypeError('Missing required property metadata')
elif not isinstance(metadata, dict):
raise TypeError('Expected property metadata to be a dict')
__self__.metadata = metadata
"""
A series of key value pairs. Changing this resource
updates the GCE state.
"""
__props__['metadata'] = metadata
if project and not isinstance(project, basestring):
raise TypeError('Expected property project to be a basestring')
__self__.project = project
"""
The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__props__['project'] = project
super(ProjectMetadata, __self__).__init__(
'gcp:compute/projectMetadata:ProjectMetadata',
__name__,
__props__,
__opts__)
def set_outputs(self, outs):
if 'metadata' in outs:
self.metadata = outs['metadata']
if 'project' in outs:
self.project = outs['project']
| 41.327869 | 101 | 0.663229 |
57b25bc80a116f9d8289a18c6f99ae58ce416fc9 | 3,358 | py | Python | internal/modpack.py | MuXiu1997/GTNH-translation-compare | 8ca0d266b764e3707e86ff140a9c5d17292f11b3 | [
"MIT"
] | null | null | null | internal/modpack.py | MuXiu1997/GTNH-translation-compare | 8ca0d266b764e3707e86ff140a9c5d17292f11b3 | [
"MIT"
] | null | null | null | internal/modpack.py | MuXiu1997/GTNH-translation-compare | 8ca0d266b764e3707e86ff140a9c5d17292f11b3 | [
"MIT"
] | null | null | null | import json
import pathlib
import zipfile
from os import path
from typing import List, Dict, Sequence
from zipfile import ZipFile
import utils
from .comparable import Comparable
from .langfiletype import LangFiletype
from .scriptfiletype import ScriptFiletype
class ModPack:
def __init__(self, pack_path: pathlib.Path):
if len(list(pack_path.glob("resources"))) == 1:
self.__pack_path = pack_path
elif len(list(pack_path.glob("*/resources"))) == 1:
self.__pack_path = pathlib.Path(path.join(list(pack_path.glob("*/resources"))[0], ".."))
self.lang_files: Sequence[Comparable] = self.__get_lang_files()
self.script_files: Sequence[Comparable] = self.__get_script_files()
def __get_lang_files(self) -> Sequence[Comparable]:
lang_files: list[LangFiletype] = []
for mod_path in self.__pack_path.glob("mods/**/*.jar"):
with mod_path.open("rb") as mod_jar:
mod = Mod(zipfile.ZipFile(mod_jar))
for filename, content in mod.lang_files.items():
sub_mod_id = filename.split("/")[1]
filename = path.join(*filename.split("/")[2:])
lang_files.append(LangFiletype(f"{mod.mod_name}[{sub_mod_id}]/{filename}", content))
return lang_files
# noinspection DuplicatedCode
def __get_script_files(self) -> Sequence[Comparable]:
script_files: list[ScriptFiletype] = []
for f in self.__pack_path.glob("scripts/*.zs"):
script_file = ScriptFiletype(f.name, utils.ensure_lf(f.read_text(encoding="utf-8", errors="ignore")))
if 0 < len(script_file.properties):
script_files.append(script_file)
return script_files
class Mod:
__jar: ZipFile
__mod_name: str | None
__lang_files: Dict[str, str] | None
def __init__(self, jar: zipfile.ZipFile):
self.__jar = jar
self.__mod_name = None
self.__lang_files = None
@property
def mod_name(self) -> str:
if self.__mod_name is None:
try:
with self.__jar.open("mcmod.info", "r") as fp:
mod_info_json = fp.read()
mod_info = json.loads(mod_info_json, strict=False)
mod_list = mod_info
if isinstance(mod_info, dict):
mod_list = mod_list.get("modList")
first_mod_name = mod_list[0].get("name")
if len(mod_list) == 1:
self.__mod_name = utils.replace_illegal_characters(first_mod_name)
else:
self.__mod_name = utils.replace_illegal_characters(f"{first_mod_name}(+{len(mod_list) - 1})")
except KeyError:
self.__mod_name = "__no-modinfo"
assert self.__mod_name is not None
return self.__mod_name
@property
def lang_files(self) -> Dict[str, str]:
if self.__lang_files is None:
self.__lang_files = {}
for f in self.__jar.namelist():
if f.endswith("en_US.lang") and len(f.split("/")) == 4:
with self.__jar.open(f, mode="r") as fp:
self.__lang_files[f] = utils.ensure_lf(fp.read().decode("utf-8", errors="ignore"))
return self.__lang_files
| 40.457831 | 117 | 0.596784 |
2a0e9b5003880d564615ddf7994ef5aabaa8be89 | 1,822 | py | Python | scripts/configure.py | wcDogg/knotty | 9333d6cf7f1d6944c6820db25c8bca39f0a7a0c1 | [
"CC0-1.0"
] | null | null | null | scripts/configure.py | wcDogg/knotty | 9333d6cf7f1d6944c6820db25c8bca39f0a7a0c1 | [
"CC0-1.0"
] | null | null | null | scripts/configure.py | wcDogg/knotty | 9333d6cf7f1d6944c6820db25c8bca39f0a7a0c1 | [
"CC0-1.0"
] | null | null | null | from pathlib import Path
# -----------------------------------
# Project Directories
# -----------------------------------
DIR_PROJECT = Path(__file__).parent.parent
DIR_TEMP = Path(DIR_PROJECT / 'temp')
DIR_IMAGES = Path(DIR_PROJECT / 'assets')
DIR_CSV = Path(DIR_PROJECT / 'files')
DIR_JSON = Path(DIR_PROJECT / 'files')
DIR_MD_THK = Path(DIR_PROJECT / 'thk')
DIR_MD_FAN = Path(DIR_PROJECT / 'fan')
DIR_MD_PK = Path(DIR_PROJECT / 'pk')
# -----------------------------------
# CSV Source Files
# -----------------------------------
CSV_THK = Path(DIR_CSV / 'thk_reference.csv')
CSV_FAN_2 = Path(DIR_CSV / 'fan_reference_2-strand.csv')
CSV_FAN_3 = Path(DIR_CSV / 'fan_reference_3-strand.csv')
CSV_FAN_4 = Path(DIR_CSV / 'fan_reference_4-strand.csv')
CSV_FAN_5 = Path(DIR_CSV / 'fan_reference_5-strand.csv')
# -----------------------------------
# Converted JSON Files
# -----------------------------------
JSON_THK = Path(DIR_JSON / 'thk_reference.json')
JSON_FAN_2 = Path(DIR_JSON / 'fan_reference_2-strand.json')
JSON_FAN_3 = Path(DIR_JSON / 'fan_reference_3-strand.json')
JSON_FAN_4 = Path(DIR_JSON / 'fan_reference_4-strand.json')
JSON_FAN_5 = Path(DIR_JSON / 'fan_reference_5-strand.json')
# -----------------------------------
# Converted MD Tables
# -----------------------------------
MD_THK = Path(DIR_TEMP / 'thk-reference.md')
MD_FAN_2 = Path(DIR_TEMP / 'fan-reference-2-strand.md')
MD_FAN_3 = Path(DIR_TEMP / 'fan-reference-3-strand.md')
MD_FAN_4 = Path(DIR_TEMP / 'fan-reference-4-strand.md')
MD_FAN_5 = Path(DIR_TEMP / 'fan-reference-5-strand.md')
# -----------------------------------
# Test
# -----------------------------------
def print_dirs():
print(DIR_PROJECT)
print(DIR_IMAGES)
print(DIR_CSV)
print(DIR_JSON)
print(DIR_MD_THK)
print(DIR_MD_FAN)
print(DIR_MD_PK)
#print_dirs() | 30.881356 | 59 | 0.59056 |
9fa4c29c85bf71c1b19f35eb37eddf9b207e907b | 549 | py | Python | MyMusicApp/blog/migrations/0010_auto_20191006_0137.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | MyMusicApp/blog/migrations/0010_auto_20191006_0137.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | MyMusicApp/blog/migrations/0010_auto_20191006_0137.py | kells4real/MusicApp | 4e4ba065c4f472243413551f63dc4e9eddf7f4a7 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-06 00:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20190914_2201'),
]
operations = [
migrations.AddField(
model_name='comment',
name='approved',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='commentreply',
name='approved',
field=models.BooleanField(default=False),
),
]
| 22.875 | 53 | 0.581056 |
18ee1ed41326548ef737d63d8fd2543089997880 | 562 | py | Python | Regs/Block_1/R1980.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | 1 | 2021-04-25T13:53:20.000Z | 2021-04-25T13:53:20.000Z | Regs/Block_1/R1980.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | null | null | null | Regs/Block_1/R1980.py | BernardoB95/Extrator_SPEDFiscal | 10b4697833c561d24654251da5f22d044f03fc16 | [
"MIT"
] | null | null | null | from ..IReg import IReg
class R1980(IReg):
def __init__(self):
self._header = ['REG',
'IND_AP',
'G4_01',
'G4_02',
'G4_03',
'G4_04',
'G4_05',
'G4_06',
'G4_07',
'G4_08',
'G4_09',
'G4_10',
'G4_11',
'G4_12']
self._hierarchy = "2"
| 24.434783 | 33 | 0.252669 |
e712e1b9a26df27f26da57ddc7eafce57824392f | 56,450 | py | Python | salt/client/__init__.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/client/__init__.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/client/__init__.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
The client module is used to create a client connection to the publisher
The data structure needs to be:
{'enc': 'clear',
'load': {'fun': '<mod.callable>',
'arg':, ('arg1', 'arg2', ...),
'tgt': '<glob or id>',
'key': '<read in the key file>'}
'''
# The components here are simple, and they need to be and stay simple, we
# want a client to have 3 external concerns, and maybe a forth configurable
# option.
# The concerns are:
# 1. Who executes the command?
# 2. What is the function being run?
# 3. What arguments need to be passed to the function?
# 4. How long do we wait for all of the replies?
#
# Import python libs
from __future__ import print_function
import os
import time
import copy
import logging
from datetime import datetime
from salt._compat import string_types
# Import salt libs
import salt.config
import salt.payload
import salt.transport
import salt.loader
import salt.utils
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.verify
import salt.syspaths as syspaths
from salt.exceptions import (
EauthAuthenticationError, SaltInvocationError, SaltReqTimeoutError
)
# Try to import range from https://github.com/ytoolshed/range
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
log = logging.getLogger(__name__)
def get_local_client(
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None,
skip_perm_errors=False):
'''
.. versionadded:: 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
'''
if mopts:
opts = mopts
else:
import salt.config
opts = salt.config.client_config(c_path)
if opts['transport'] == 'raet':
import salt.client.raet
return salt.client.raet.LocalClient(mopts=opts)
elif opts['transport'] == 'zeromq':
return LocalClient(mopts=opts, skip_perm_errors=skip_perm_errors)
class LocalClient(object):
'''
The interface used by the :command:`salt` CLI tool on the Salt Master
``LocalClient`` is used to send a command to Salt minions to execute
:ref:`execution modules <all-salt.modules>` and return the results to the
Salt Master.
Importing and using ``LocalClient`` must be done on the same machine as the
Salt Master and it must be done using the same user that the Salt Master is
running as. (Unless :conf_master:`external_auth` is configured and
authentication credentials are included in the execution).
.. code-block:: python
import salt.client
local = salt.client.LocalClient()
local.cmd('*', 'test.fib', [10])
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None, skip_perm_errors=False):
if mopts:
self.opts = mopts
else:
if os.path.isdir(c_path):
log.warning(
'{0} expects a file path not a directory path({1}) to '
'it\'s \'c_path\' keyword argument'.format(
self.__class__.__name__, c_path
)
)
self.opts = salt.config.client_config(c_path)
self.serial = salt.payload.Serial(self.opts)
self.salt_user = self.__get_user()
self.skip_perm_errors = skip_perm_errors
self.key = self.__read_master_key()
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
listen=not self.opts.get('__worker', False))
self.returners = salt.loader.returners(self.opts, {})
def __read_master_key(self):
'''
Read in the rotating master authentication key
'''
key_user = self.salt_user
if key_user == 'root':
if self.opts.get('user', 'root') != 'root':
key_user = self.opts.get('user', 'root')
if key_user.startswith('sudo_'):
key_user = self.opts.get('user', 'root')
keyfile = os.path.join(self.opts['cachedir'],
'.{0}_key'.format(key_user))
# Make sure all key parent directories are accessible
salt.utils.verify.check_path_traversal(self.opts['cachedir'],
key_user,
self.skip_perm_errors)
try:
with salt.utils.fopen(keyfile, 'r') as key:
return key.read()
except (OSError, IOError):
# Fall back to eauth
return ''
def __get_user(self):
'''
Determine the current user running the salt command
'''
user = salt.utils.get_user()
# if our user is root, look for other ways to figure out
# who we are
env_vars = ('SUDO_USER',)
if user == 'root' or user == self.opts['user']:
for evar in env_vars:
if evar in os.environ:
return 'sudo_{0}'.format(os.environ[evar])
return user
def _convert_range_to_list(self, tgt):
'''
convert a seco.range range into a list target
'''
range_ = seco.range.Range(self.opts['range_server'])
try:
return range_.expand(tgt)
except seco.range.RangeException as err:
print('Range server exception: {0}'.format(err))
return []
def _get_timeout(self, timeout):
'''
Return the timeout to use
'''
if timeout is None:
return self.opts['timeout']
if isinstance(timeout, int):
return timeout
if isinstance(timeout, string_types):
try:
return int(timeout)
except ValueError:
return self.opts['timeout']
# Looks like the timeout is invalid, use config
return self.opts['timeout']
def gather_job_info(self, jid, tgt, tgt_type, minions, **kwargs):
'''
Return the information about a given job
'''
log.debug('Checking whether jid {0} is still running'.format(jid))
timeout = self.opts['gather_job_timeout']
pub_data = self.run_job(tgt,
'saltutil.find_job',
arg=[jid],
expr_form=tgt_type,
timeout=timeout,
)
if not pub_data:
return pub_data
minions.update(pub_data['minions'])
return self.get_returns(pub_data['jid'],
minions,
self._get_timeout(timeout),
pending_tags=[jid])
def _check_pub_data(self, pub_data):
'''
Common checks on the pub_data data structure returned from running pub
'''
if not pub_data:
# Failed to autnenticate, this could be a bunch of things
raise EauthAuthenticationError(
'Failed to authenticate! This is most likely because this '
'user is not permitted to execute commands, but there is a '
'small possibility that a disk error ocurred (check '
'disk/inode usage).'
)
# Failed to connect to the master and send the pub
if 'jid' not in pub_data:
return {}
if pub_data['jid'] == '0':
print('Failed to connect to the Master, '
'is the Salt Master running?')
return {}
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not pub_data['minions']:
print('No minions matched the target. '
'No command was sent, no jid was assigned.')
return {}
return pub_data
def run_job(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
timeout=None,
kwarg=None,
**kwargs):
'''
Asynchronously send a command to connected minions
Prep the job directory and publish a command to any targeted minions.
:return: A dictionary of (validated) ``pub_data`` or an empty
dictionary on failure. The ``pub_data`` contains the job ID and a
list of all minions that are expected to return data.
.. code-block:: python
>>> local.run_job('*', 'test.sleep', [300])
{'jid': '20131219215650131543', 'minions': ['jerry']}
'''
arg = salt.utils.args.condition_input(arg, kwarg)
jid = ''
# Subscribe to all events and subscribe as early as possible
self.event.subscribe(jid)
pub_data = self.pub(
tgt,
fun,
arg,
expr_form,
ret,
jid=jid,
timeout=self._get_timeout(timeout),
**kwargs)
return self._check_pub_data(pub_data)
def cmd_async(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Asynchronously send a command to connected minions
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:returns: A job ID or 0 on failure.
.. code-block:: python
>>> local.cmd_async('*', 'test.sleep', [300])
'20131219215921857715'
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(tgt,
fun,
arg,
expr_form,
ret,
**kwargs)
try:
return pub_data['jid']
except KeyError:
return 0
def cmd_subset(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
kwarg=None,
sub=3,
cli=False,
**kwargs):
'''
Execute a command on a random subset of the targeted systems
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:param sub: The number of systems to execute on
.. code-block:: python
>>> SLC.cmd_subset('*', 'test.ping', sub=1)
{'jerry': True}
'''
group = self.cmd(tgt, 'sys.list_functions', expr_form=expr_form)
f_tgt = []
for minion, ret in group.items():
if len(f_tgt) >= sub:
break
if fun in ret:
f_tgt.append(minion)
func = self.cmd
if cli:
func = self.cmd_cli
return func(
f_tgt,
fun,
arg,
expr_form='list',
ret=ret,
kwarg=kwarg,
**kwargs)
def cmd_batch(
self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
kwarg=None,
batch='10%',
**kwargs):
'''
Iteratively execute a command on subsets of minions at a time
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:param batch: The batch identifier of systems to execute on
:returns: A generator of minion returns
.. code-block:: python
>>> returns = local.cmd_batch('*', 'state.highstate', bat='10%')
>>> for return in returns:
... print return
{'jerry': {...}}
{'dave': {...}}
{'stewart': {...}}
'''
import salt.cli.batch
arg = salt.utils.args.condition_input(arg, kwarg)
opts = {'tgt': tgt,
'fun': fun,
'arg': arg,
'expr_form': expr_form,
'ret': ret,
'batch': batch,
'raw': kwargs.get('raw', False)}
for key, val in self.opts.items():
if key not in opts:
opts[key] = val
batch = salt.cli.batch.Batch(opts, quiet=True)
for ret in batch.run():
yield ret
def cmd(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Synchronously execute a command on targeted minions
The cmd method will execute and wait for the timeout period for all
minions to reply, then it will return all minion data at once.
.. code-block:: python
>>> import salt.client
>>> local = salt.client.LocalClient()
>>> local.cmd('*', 'cmd.run', ['whoami'])
{'jerry': 'root'}
With extra keyword arguments for the command function to be run:
.. code-block:: python
local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'})
Compound commands can be used for multiple executions in a single
publish. Function names and function arguments are provided in separate
lists but the index values must correlate and an empty list must be
used if no arguments are required.
.. code-block:: python
>>> local.cmd('*', [
'grains.items',
'sys.doc',
'cmd.run',
],
[
[],
[],
['uptime'],
])
:param tgt: Which minions to target for the execution. Default is shell
glob. Modified by the ``expr_form`` option.
:type tgt: string or list
:param fun: The module and function to call on the specified minions of
the form ``module.function``. For example ``test.ping`` or
``grains.items``.
Compound commands
Multiple functions may be called in a single publish by
passing a list of commands. This can dramatically lower
overhead and speed up the application communicating with Salt.
This requires that the ``arg`` param is a list of lists. The
``fun`` list and the ``arg`` list must correlate by index
meaning a function that does not take arguments must still have
a corresponding empty list at the expected index.
:type fun: string or list of strings
:param arg: A list of arguments to pass to the remote function. If the
function takes no arguments ``arg`` may be omitted except when
executing a compound command.
:type arg: list or list-of-lists
:param timeout: Seconds to wait after the last minion returns but
before all minions return.
:param expr_form: The type of ``tgt``. Allowed values:
* ``glob`` - Bash glob completion - Default
* ``pcre`` - Perl style regular expression
* ``list`` - Python list of hosts
* ``grain`` - Match based on a grain comparison
* ``grain_pcre`` - Grain comparison with a regex
* ``pillar`` - Pillar data comparison
* ``nodegroup`` - Match on nodegroup
* ``range`` - Use a Range server for matching
* ``compound`` - Pass a compound match string
:param ret: The returner to use. The value passed can be single
returner, or a comma delimited list of returners to call in order
on the minions
:param kwarg: A dictionary with keyword arguments for the function.
:param kwargs: Optional keyword arguments.
Authentication credentials may be passed when using
:conf_master:`external_auth`.
For example: ``local.cmd('*', 'test.ping', username='saltdev',
password='saltdev', eauth='pam')``.
Or: ``local.cmd('*', 'test.ping',
token='5871821ea51754fdcea8153c1c745433')``
:returns: A dictionary with the result of the execution, keyed by
minion ID. A compound command will return a sub-dictionary keyed by
function name.
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs)
if not pub_data:
return pub_data
ret = {}
for fn_ret in self.get_cli_event_returns(
pub_data['jid'],
pub_data['minions'],
self._get_timeout(timeout),
tgt,
expr_form,
**kwargs):
if fn_ret:
for mid, data in fn_ret.items():
ret[mid] = data.get('ret', {})
return ret
def cmd_cli(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
verbose=False,
kwarg=None,
**kwargs):
'''
Used by the :command:`salt` CLI. This method returns minion returns as
the come back and attempts to block until all minions return.
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:param verbose: Print extra information about the running command
:returns: A generator
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(
tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs)
if not pub_data:
yield pub_data
else:
try:
for fn_ret in self.get_cli_event_returns(
pub_data['jid'],
pub_data['minions'],
self._get_timeout(timeout),
tgt,
expr_form,
verbose,
**kwargs):
if not fn_ret:
continue
yield fn_ret
except KeyboardInterrupt:
msg = ('Exiting on Ctrl-C\nThis job\'s jid is:\n{0}\n'
'The minions may not have all finished running and any '
'remaining minions will return upon completion. To '
'look up the return data for this job later run:\n'
'salt-run jobs.lookup_jid {0}').format(pub_data['jid'])
raise SystemExit(msg)
def cmd_iter(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Yields the individual minion returns as they come in
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:return: A generator
.. code-block:: python
>>> ret = local.cmd_iter('*', 'test.ping')
>>> for i in ret:
... print i
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
{'stewart': {'ret': True}}
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(
tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs)
if not pub_data:
yield pub_data
else:
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
self._get_timeout(timeout),
tgt,
expr_form,
**kwargs):
if not fn_ret:
continue
yield fn_ret
def cmd_iter_no_block(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Blocks while waiting for individual minions to return.
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:returns: None until the next minion returns. This allows for actions
to be injected in between minion returns.
.. code-block:: python
>>> ret = local.cmd_iter('*', 'test.ping')
>>> for i in ret:
... print i
None
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
None
{'stewart': {'ret': True}}
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(
tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs)
if not pub_data:
yield pub_data
else:
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
timeout,
tgt,
expr_form,
**kwargs):
yield fn_ret
def cmd_full_return(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
verbose=False,
kwarg=None,
**kwargs):
'''
Execute a salt command and return
'''
arg = salt.utils.args.condition_input(arg, kwarg)
pub_data = self.run_job(
tgt,
fun,
arg,
expr_form,
ret,
timeout,
**kwargs)
if not pub_data:
return pub_data
return (self.get_cli_static_event_returns(pub_data['jid'],
pub_data['minions'],
timeout,
tgt,
expr_form,
verbose))
def get_cli_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
show_jid=False,
**kwargs):
'''
Starts a watcher looking at the return data for a specified JID
:returns: all of the information for the JID
'''
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
if timeout is None:
timeout = self.opts['timeout']
fret = {}
# make sure the minions is a set (since we do set operations on it)
minions = set(minions)
found = set()
# start this before the cache lookup-- in case new stuff comes in
event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout)
# get the info from the cache
ret = self.get_cache_returns(jid)
if ret != {}:
found.update(set(ret.keys()))
yield ret
# if you have all the returns, stop
if len(found.intersection(minions)) >= len(minions):
raise StopIteration()
# otherwise, get them from the event system
for event in event_iter:
if event != {}:
found.update(set(event.keys()))
yield event
if len(found.intersection(minions)) >= len(minions):
raise StopIteration()
def get_iter_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
expect_minions=False,
**kwargs):
'''
Watch the event system and return job data as it comes in
:returns: all of the information for the JID
'''
if not isinstance(minions, set):
if isinstance(minions, string_types):
minions = set([minions])
elif isinstance(minions, (list, tuple)):
minions = set(list(minions))
if timeout is None:
timeout = self.opts['timeout']
start = int(time.time())
timeout_at = start + timeout
found = set()
# Check to see if the jid is real, if not return the empty dict
if not self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) != {}:
log.warning('jid does not exist')
yield {}
# stop the iteration, since the jid is invalid
raise StopIteration()
# Wait for the hosts to check in
syndic_wait = 0
last_time = False
log.debug(
'get_iter_returns for jid {0} sent to {1} will timeout at {2}'.format(
jid, minions, datetime.fromtimestamp(timeout_at).time()
)
)
while True:
# Process events until timeout is reached or all minions have returned
time_left = timeout_at - int(time.time())
# Wait 0 == forever, use a minimum of 1s
wait = max(1, time_left)
raw = None
# Look for events if we haven't yet found all the minions or if we are still waiting for
# the syndics to report on how many minions they have forwarded the command to
if (len(found.intersection(minions)) < len(minions) or
(self.opts['order_masters'] and syndic_wait < self.opts.get('syndic_wait', 1))):
raw = self.event.get_event(wait, jid)
if raw is not None:
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue
if 'syndic' in raw:
minions.update(raw['syndic'])
continue
if 'return' not in raw:
continue
if kwargs.get('raw', False):
found.add(raw['id'])
yield raw
else:
found.add(raw['id'])
ret = {raw['id']: {'ret': raw['return']}}
if 'out' in raw:
ret[raw['id']]['out'] = raw['out']
log.debug('jid {0} return from {1}'.format(jid, raw['id']))
yield ret
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
log.debug('jid {0} found all minions {1}'.format(jid, found))
if self.opts['order_masters']:
if syndic_wait < self.opts.get('syndic_wait', 1):
syndic_wait += 1
timeout_at = int(time.time()) + 1
log.debug('jid {0} syndic_wait {1} will now timeout at {2}'.format(
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time()))
continue
break
continue
# Then event system timeout was reached and nothing was returned
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
log.debug('jid {0} found all minions {1}'.format(jid, found))
if self.opts['order_masters']:
if syndic_wait < self.opts.get('syndic_wait', 1):
syndic_wait += 1
timeout_at = int(time.time()) + 1
log.debug(
'jid {0} syndic_wait {1} will now timeout at {2}'.format(
jid, syndic_wait, datetime.fromtimestamp(timeout_at).time()
)
)
continue
break
if last_time:
if len(found) < len(minions):
log.info(
'jid {0} minions {1} did not return in time'.format(
jid, (minions - found)
)
)
if expect_minions:
for minion in list((minions - found)):
yield {minion: {'failed': True}}
break
if int(time.time()) > timeout_at:
# The timeout has been reached, check the jid to see if the
# timeout needs to be increased
jinfo = self.gather_job_info(jid, tgt, tgt_type, minions - found, **kwargs)
still_running = [id_ for id_, jdat in jinfo.iteritems()
if jdat
]
if still_running:
timeout_at = int(time.time()) + timeout
log.debug(
'jid {0} still running on {1} will now timeout at {2}'.format(
jid, still_running, datetime.fromtimestamp(timeout_at).time()
)
)
continue
else:
last_time = True
log.debug('jid {0} not running on any minions last time'.format(jid))
continue
time.sleep(0.01)
def get_returns(
self,
jid,
minions,
timeout=None,
pending_tags=None):
'''
Get the returns for the command line interface via the event system
'''
minions = set(minions)
if timeout is None:
timeout = self.opts['timeout']
start = int(time.time())
timeout_at = start + timeout
log.debug(
'get_returns for jid {0} sent to {1} will timeout at {2}'.format(
jid, minions, datetime.fromtimestamp(timeout_at).time()
)
)
found = set()
ret = {}
# Check to see if the jid is real, if not return the empty dict
if not self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) != {}:
log.warning('jid does not exist')
return ret
# Wait for the hosts to check in
while True:
time_left = timeout_at - int(time.time())
wait = max(1, time_left)
raw = self.event.get_event(wait, jid, pending_tags=pending_tags)
if raw is not None and 'return' in raw:
found.add(raw['id'])
ret[raw['id']] = raw['return']
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
log.debug('jid {0} found all minions'.format(jid))
break
continue
# Then event system timeout was reached and nothing was returned
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
log.debug('jid {0} found all minions'.format(jid))
break
if int(time.time()) > timeout_at:
log.info(
'jid {0} minions {1} did not return in time'.format(
jid, (minions - found)
)
)
break
time.sleep(0.01)
return ret
def get_full_returns(self, jid, minions, timeout=None):
'''
This method starts off a watcher looking at the return data for
a specified jid, it returns all of the information for the jid
'''
# TODO: change this from ret to return... or the other way.
# Its inconsistent, we should pick one
ret = {}
# create the iterator-- since we want to get anyone in the middle
event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout)
data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid)
for minion in data:
m_data = {}
if u'return' in data[minion]:
m_data['ret'] = data[minion].get(u'return')
else:
m_data['ret'] = data[minion].get('return')
if 'out' in data[minion]:
m_data['out'] = data[minion]['out']
if minion in ret:
ret[minion].update(m_data)
else:
ret[minion] = m_data
# if we have all the minion returns, lets just return
if len(set(ret.keys()).intersection(minions)) >= len(minions):
return ret
# otherwise lets use the listener we created above to get the rest
for event_ret in event_iter:
# if nothing in the event_ret, skip
if event_ret == {}:
time.sleep(0.02)
continue
for minion, m_data in event_ret.iteritems():
if minion in ret:
ret[minion].update(m_data)
else:
ret[minion] = m_data
# are we done yet?
if len(set(ret.keys()).intersection(minions)) >= len(minions):
return ret
# otherwise we hit the timeout, return what we have
return ret
def get_cache_returns(self, jid):
'''
Execute a single pass to gather the contents of the job cache
'''
ret = {}
data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid)
for minion in data:
m_data = {}
if u'return' in data[minion]:
m_data['ret'] = data[minion].get(u'return')
else:
m_data['ret'] = data[minion].get('return')
if 'out' in data[minion]:
m_data['out'] = data[minion]['out']
if minion in ret:
ret[minion].update(m_data)
else:
ret[minion] = m_data
return ret
def get_cli_static_event_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
show_timeout=False,
show_jid=False):
'''
Get the returns for the command line interface via the event system
'''
log.trace('entered - function get_cli_static_event_returns()')
minions = set(minions)
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
if timeout is None:
timeout = self.opts['timeout']
start = int(time.time())
timeout_at = start + timeout
found = set()
ret = {}
# Check to see if the jid is real, if not return the empty dict
if not self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) != {}:
log.warning('jid does not exist')
return ret
# Wait for the hosts to check in
while True:
# Process events until timeout is reached or all minions have returned
time_left = timeout_at - int(time.time())
# Wait 0 == forever, use a minimum of 1s
wait = max(1, time_left)
raw = self.event.get_event(wait, jid)
if raw is not None and 'return' in raw:
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue
found.add(raw['id'])
ret[raw['id']] = {'ret': raw['return']}
ret[raw['id']]['success'] = raw.get('success', False)
if 'out' in raw:
ret[raw['id']]['out'] = raw['out']
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
break
continue
# Then event system timeout was reached and nothing was returned
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
break
if int(time.time()) > timeout_at:
if verbose or show_timeout:
if self.opts.get('minion_data_cache', False) \
or tgt_type in ('glob', 'pcre', 'list'):
if len(found) < len(minions):
fail = sorted(list(minions.difference(found)))
for minion in fail:
ret[minion] = {
'out': 'no_return',
'ret': 'Minion did not return'
}
break
time.sleep(0.01)
return ret
def get_cli_event_returns(
self,
jid,
minions,
timeout=None,
tgt='*',
tgt_type='glob',
verbose=False,
show_timeout=False,
show_jid=False,
**kwargs):
'''
Get the returns for the command line interface via the event system
'''
log.trace('func get_cli_event_returns()')
if not isinstance(minions, set):
if isinstance(minions, string_types):
minions = set([minions])
elif isinstance(minions, (list, tuple)):
minions = set(list(minions))
if verbose:
msg = 'Executing job with jid {0}'.format(jid)
print(msg)
print('-' * len(msg) + '\n')
elif show_jid:
print('jid: {0}'.format(jid))
if timeout is None:
timeout = self.opts['timeout']
start = time.time()
timeout_at = start + timeout
found = set()
# Check to see if the jid is real, if not return the empty dict
if not self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) != {}:
log.warning('jid does not exist')
yield {}
# stop the iteration, since the jid is invalid
raise StopIteration()
# Wait for the hosts to check in
syndic_wait = 0
last_time = False
while True:
# Process events until timeout is reached or all minions have returned
time_left = timeout_at - time.time()
# Wait 0 == forever, use a minimum of 1s
wait = max(1, time_left)
raw = self.event.get_event(wait, jid)
log.trace('get_cli_event_returns() called self.event.get_event() and received: raw={0}'.format(raw))
if raw is not None:
if 'minions' in raw.get('data', {}):
minions.update(raw['data']['minions'])
continue
if 'syndic' in raw:
minions.update(raw['syndic'])
continue
if 'return' not in raw:
continue
found.add(raw.get('id'))
ret = {raw['id']: {'ret': raw['return']}}
if 'out' in raw:
ret[raw['id']]['out'] = raw['out']
if 'retcode' in raw:
ret[raw['id']]['retcode'] = raw['retcode']
log.trace('raw = {0}'.format(raw))
log.trace('ret = {0}'.format(ret))
log.trace('yeilding \'ret\'')
yield ret
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
if self.opts['order_masters']:
if syndic_wait < self.opts.get('syndic_wait', 1):
syndic_wait += 1
timeout_at = time.time() + 1
continue
break
continue
# Then event system timeout was reached and nothing was returned
if len(found.intersection(minions)) >= len(minions):
# All minions have returned, break out of the loop
if self.opts['order_masters']:
if syndic_wait < self.opts.get('syndic_wait', 1):
syndic_wait += 1
timeout_at = time.time() + 1
continue
break
if last_time:
if verbose or show_timeout:
if self.opts.get('minion_data_cache', False) \
or tgt_type in ('glob', 'pcre', 'list'):
if len(found) < len(minions):
fail = sorted(list(minions.difference(found)))
for minion in fail:
yield({
minion: {
'out': 'no_return',
'ret': 'Minion did not return'
}
})
break
if time.time() > timeout_at:
# The timeout has been reached, check the jid to see if the
# timeout needs to be increased
jinfo = self.gather_job_info(jid, tgt, tgt_type, minions - found, **kwargs)
more_time = False
for id_ in jinfo:
if jinfo[id_]:
if verbose:
print(
'Execution is still running on {0}'.format(id_)
)
more_time = True
if more_time:
timeout_at = time.time() + timeout
continue
else:
last_time = True
time.sleep(0.01)
def get_event_iter_returns(self, jid, minions, timeout=None):
'''
Gather the return data from the event system, break hard when timeout
is reached.
'''
log.trace('entered - function get_event_iter_returns()')
if timeout is None:
timeout = self.opts['timeout']
found = set()
# Check to see if the jid is real, if not return the empty dict
if not self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) != {}:
log.warning('jid does not exist')
yield {}
# stop the iteration, since the jid is invalid
raise StopIteration()
# Wait for the hosts to check in
while True:
raw = self.event.get_event(timeout)
if raw is None:
# Timeout reached
break
if 'minions' in raw.get('data', {}):
continue
found.add(raw['id'])
ret = {raw['id']: {'ret': raw['return']}}
if 'out' in raw:
ret[raw['id']]['out'] = raw['out']
yield ret
time.sleep(0.02)
def _prep_pub(self,
tgt,
fun,
arg,
expr_form,
ret,
jid,
timeout,
**kwargs):
'''
Set up the payload_kwargs to be sent down to the master
'''
if expr_form == 'nodegroup':
if tgt not in self.opts['nodegroups']:
conf_file = self.opts.get(
'conf_file', 'the master config file'
)
raise SaltInvocationError(
'Node group {0} unavailable in {1}'.format(
tgt, conf_file
)
)
tgt = salt.utils.minions.nodegroup_comp(tgt,
self.opts['nodegroups'])
expr_form = 'compound'
# Convert a range expression to a list of nodes and change expression
# form to list
if expr_form == 'range' and HAS_RANGE:
tgt = self._convert_range_to_list(tgt)
expr_form = 'list'
# If an external job cache is specified add it to the ret list
if self.opts.get('ext_job_cache'):
if ret:
ret += ',{0}'.format(self.opts['ext_job_cache'])
else:
ret = self.opts['ext_job_cache']
# format the payload - make a function that does this in the payload
# module
# Generate the standard keyword args to feed to format_payload
payload_kwargs = {'cmd': 'publish',
'tgt': tgt,
'fun': fun,
'arg': arg,
'key': self.key,
'tgt_type': expr_form,
'ret': ret,
'jid': jid}
# if kwargs are passed, pack them.
if kwargs:
payload_kwargs['kwargs'] = kwargs
# If we have a salt user, add it to the payload
if self.salt_user:
payload_kwargs['user'] = self.salt_user
# If we're a syndication master, pass the timeout
if self.opts['order_masters']:
payload_kwargs['to'] = timeout
return payload_kwargs
def pub(self,
tgt,
fun,
arg=(),
expr_form='glob',
ret='',
jid='',
timeout=5,
**kwargs):
'''
Take the required arguments and publish the given command.
Arguments:
tgt:
The tgt is a regex or a glob used to match up the ids on
the minions. Salt works by always publishing every command
to all of the minions and then the minions determine if
the command is for them based on the tgt value.
fun:
The function name to be called on the remote host(s), this
must be a string in the format "<modulename>.<function name>"
arg:
The arg option needs to be a tuple of arguments to pass
to the calling function, if left blank
Returns:
jid:
A string, as returned by the publisher, which is the job
id, this will inform the client where to get the job results
minions:
A set, the targets that the tgt passed should match.
'''
# Make sure the publisher is running by checking the unix socket
if not os.path.exists(os.path.join(self.opts['sock_dir'],
'publish_pull.ipc')):
log.error(
'Unable to connect to the publisher! '
'You do not have permissions to access '
'{0}'.format(self.opts['sock_dir'])
)
return {'jid': '0', 'minions': []}
payload_kwargs = self._prep_pub(
tgt,
fun,
arg,
expr_form,
ret,
jid,
timeout,
**kwargs)
master_uri = 'tcp://' + salt.utils.ip_bracket(self.opts['interface']) + \
':' + str(self.opts['ret_port'])
sreq = salt.transport.Channel.factory(self.opts, crypt='clear', master_uri=master_uri)
try:
payload = sreq.send(payload_kwargs)
except SaltReqTimeoutError:
log.error(
'Salt request timed out. If this error persists, '
'worker_threads may need to be increased.'
)
return {}
if not payload:
# The master key could have changed out from under us! Regen
# and try again if the key has changed
key = self.__read_master_key()
if key == self.key:
return payload
self.key = key
payload_kwargs['key'] = self.key
payload = sreq.send(payload_kwargs)
if not payload:
return payload
# We have the payload, let's get rid of SREQ fast(GC'ed faster)
del sreq
return {'jid': payload['load']['jid'],
'minions': payload['load']['minions']}
def __del__(self):
# This IS really necessary!
# When running tests, if self.events is not destroyed, we leak 2
# threads per test case which uses self.client
if hasattr(self, 'event'):
# The call bellow will take care of calling 'self.event.destroy()'
del self.event
class SSHClient(object):
'''
Create a client object for executing routines via the salt-ssh backend
'''
def __init__(self,
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None):
if mopts:
self.opts = mopts
else:
if os.path.isdir(c_path):
log.warning(
'{0} expects a file path not a directory path({1}) to '
'it\'s \'c_path\' keyword argument'.format(
self.__class__.__name__, c_path
)
)
self.opts = salt.config.client_config(c_path)
def _prep_ssh(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts['argv'] = [fun] + arg
opts['selected_target_option'] = expr_form
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts)
def cmd_iter(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
**kwargs):
'''
Execute a single command via the salt-ssh subsystem and return a
generator
'''
ssh = self._prep_ssh(
tgt,
fun,
arg,
timeout,
expr_form,
kwarg,
**kwargs)
for ret in ssh.run_iter():
yield ret
def cmd(
self,
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
kwarg=None,
**kwargs):
'''
Execute a single command via the salt-ssh subsystem and return all
routines at once
'''
ssh = self._prep_ssh(
tgt,
fun,
arg,
timeout,
expr_form,
kwarg,
**kwargs)
final = {}
for ret in ssh.run_iter():
final.update(ret)
return final
class FunctionWrapper(dict):
'''
Create a function wrapper that looks like the functions dict on the minion
but invoked commands on the minion via a LocalClient.
This allows SLS files to be loaded with an object that calls down to the
minion when the salt functions dict is referenced.
'''
def __init__(self, opts, minion):
super(FunctionWrapper, self).__init__()
self.opts = opts
self.minion = minion
self.local = LocalClient(self.opts['conf_file'])
self.functions = self.__load_functions()
def __missing__(self, key):
'''
Since the function key is missing, wrap this call to a command to the
minion of said key if it is available in the self.functions set
'''
if key not in self.functions:
raise KeyError
return self.run_key(key)
def __load_functions(self):
'''
Find out what functions are available on the minion
'''
return set(self.local.cmd(self.minion,
'sys.list_functions').get(self.minion, []))
def run_key(self, key):
'''
Return a function that executes the arguments passed via the local
client
'''
def func(*args, **kwargs):
'''
Run a remote call
'''
args = list(args)
for _key, _val in kwargs:
args.append('{0}={1}'.format(_key, _val))
return self.local.cmd(self.minion, key, args)
return func
class Caller(object):
'''
``Caller`` is the same interface used by the :command:`salt-call`
command-line tool on the Salt Minion.
Importing and using ``Caller`` must be done on the same machine as a
Salt Minion and it must be done using the same user that the Salt Minion is
running as.
Usage:
.. code-block:: python
import salt.client
caller = salt.client.Caller()
caller.function('test.ping')
# Or call objects directly
caller.sminion.functions['cmd.run']('ls -l')
Note, a running master or minion daemon is not required to use this class.
Running ``salt-call --local`` simply sets :conf_minion:`file_client` to
``'local'``. The same can be achieved at the Python level by including that
setting in a minion config file.
Instantiate a new Caller() instance using a file system path to the minion
config file:
.. code-block:: python
caller = salt.client.Caller('/path/to/custom/minion_config')
caller.sminion.functions['grains.items']()
Instantiate a new Caller() instance using a dictionary of the minion
config:
.. versionadded:: 2014.7.0
Pass the minion config as a dictionary.
.. code-block:: python
import salt.client
import salt.config
opts = salt.config.minion_config('/etc/salt/minion')
opts['file_client'] = 'local'
caller = salt.client.Caller(mopts=opts)
caller.sminion.functions['grains.items']()
'''
def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'minion'),
mopts=None):
if mopts:
self.opts = mopts
else:
self.opts = salt.config.minion_config(c_path)
self.sminion = salt.minion.SMinion(self.opts)
def function(self, fun, *args, **kwargs):
'''
Call a single salt function
'''
func = self.sminion.functions[fun]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
salt.utils.args.parse_input(args),
kwargs)
return func(*args, **kwargs)
| 34.759852 | 112 | 0.491408 |
27054226a91fdb95905a4f2e64c839a3b80e450e | 1,755 | py | Python | accounts/forms.py | MrRezoo/django-online-shop | 2b0044ed41fe1b89c5fd2c2ea73822c6f43b5f6e | [
"MIT"
] | 1 | 2021-09-14T14:54:16.000Z | 2021-09-14T14:54:16.000Z | accounts/forms.py | MrRezoo/django-online-shop | 2b0044ed41fe1b89c5fd2c2ea73822c6f43b5f6e | [
"MIT"
] | 1 | 2022-01-18T07:20:57.000Z | 2022-01-18T07:20:57.000Z | accounts/forms.py | MrRezoo/django-online-shop | 2b0044ed41fe1b89c5fd2c2ea73822c6f43b5f6e | [
"MIT"
] | null | null | null | from django import forms
from accounts.models import User
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='password', widget=forms.PasswordInput)
password2 = forms.CharField(label='confirm password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'full_name', 'phone_number')
def clean_password2(self):
cd = self.cleaned_data
if cd['password1'] and cd['password2'] and cd['password1'] != cd['password2']:
raise forms.ValidationError('passwords must match')
return cd['password2']
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'full_name', 'phone_number')
def clean_password(self):
return self.initial['password']
class UserLoginForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class UserRegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.EmailInput(attrs={'class': 'form-control'}))
full_name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'}))
phone_number = forms.CharField(max_length=12,widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control'})) | 36.5625 | 105 | 0.691738 |
0a7358c1072c692118f8c185bb82b0efb22c6ff8 | 2,295 | py | Python | tools/publish_doc.py | jandom/hawktracer | e53b07bc812c4cfe8f6253ddb48ac43de8fa74a8 | [
"MIT"
] | 116 | 2018-05-04T14:51:58.000Z | 2022-02-08T23:47:28.000Z | tools/publish_doc.py | jandom/hawktracer | e53b07bc812c4cfe8f6253ddb48ac43de8fa74a8 | [
"MIT"
] | 58 | 2018-05-04T15:00:15.000Z | 2020-11-06T11:34:11.000Z | tools/publish_doc.py | beila/hawktracer | d427c6a66097787f4e5431e1cae0278f1f03ca4c | [
"MIT"
] | 32 | 2018-05-05T12:05:56.000Z | 2021-12-06T02:18:05.000Z | import argparse
import distutils.dir_util
import os
import subprocess
import sys
import tempfile
def call_process(call_args, custom_error, exit_on_error=False):
try:
subprocess.check_output(call_args)
except subprocess.CalledProcessError as e:
print(e.output)
print('{}. Return code: {}'.format(custom_error, e.returncode))
if exit_on_error:
sys.exit(1)
else:
raise
parser = argparse.ArgumentParser(description='Publishes HawkTracer documentation')
parser.add_argument('-b', '--build-dir', help='HawkTracer build directory', required=True)
parser.add_argument('-d', '--publish-dirs', nargs='+',
help='Directories for this version of the documentation, e.g. 0.7.0, stable etc.')
parser.add_argument('-r', '--repository', help='HawkTracer repository path',
default='git@github.com:hawktracer/doc.git')
args = parser.parse_args()
call_process(['cmake', '--build', args.build_dir, '--target', 'doc_doxygen'], 'Building documentation failed.', True)
doc_path = os.path.abspath(os.path.join(args.build_dir, 'doxygen_doc', 'html'))
if not os.path.exists(doc_path):
print('Documentation path "{}" doesn\'t exist!'.format(doc_path))
sys.exit(1)
current_dir = os.getcwd()
clone_dir = tempfile.mkdtemp()
try:
call_process(['git', 'clone', args.repository, clone_dir], 'Unable to clone repository')
os.chdir(clone_dir)
call_process(['git', 'checkout', 'gh-pages'], 'Unable to switch to doc branch.')
print('Removing old documentation...')
for publish_dir in args.publish_dirs:
publish_dir = os.path.join(clone_dir, publish_dir)
if os.path.isdir(publish_dir):
call_process(['git', 'rm', '-rfq', publish_dir], 'Unable to remove previous documentation')
print('Copying documentation to a repository...')
distutils.dir_util.copy_tree(doc_path, publish_dir)
print("Committing documentation...")
call_process(['git', 'add', '.'], 'Unable to add files to commit.')
call_process(['git', 'commit', '-a', '-m', 'update doc'], 'Unable to commit new documentation.')
call_process(['git', 'push', 'origin', 'gh-pages'], 'Unable to push documentation.')
finally:
distutils.dir_util.remove_tree(clone_dir)
| 38.25 | 117 | 0.677996 |
bb8dd02dbc0e681279a512813eab78521f93df3d | 1,266 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/seasonality_event_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/seasonality_event_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/seasonality_event_status.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'SeasonalityEventStatusEnum',
},
)
class SeasonalityEventStatusEnum(proto.Message):
r"""Message describing seasonality event statuses. The two types
of seasonality events are BiddingSeasonalityAdjustments and
BiddingDataExclusions.
"""
class SeasonalityEventStatus(proto.Enum):
r"""The possible statuses of a Seasonality Event."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 4
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.44186 | 74 | 0.71485 |
c65e9e27c3609c2ae869869035f66df948896e61 | 4,641 | py | Python | build/gn/verify_depfile.py | allansrc/fuchsia | a2c235b33fc4305044d496354a08775f30cdcf37 | [
"BSD-2-Clause"
] | 210 | 2019-02-05T12:45:09.000Z | 2022-03-28T07:59:06.000Z | build/gn/verify_depfile.py | PlugFox/fuchsia | 39afe5230d41628b3c736a6e384393df954968c8 | [
"BSD-2-Clause"
] | 56 | 2021-06-03T03:16:25.000Z | 2022-03-20T01:07:44.000Z | build/gn/verify_depfile.py | PlugFox/fuchsia | 39afe5230d41628b3c736a6e384393df954968c8 | [
"BSD-2-Clause"
] | 73 | 2019-03-06T18:55:23.000Z | 2022-03-26T12:04:51.000Z | #!/usr/bin/env python
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import os.path
import sys
# Exempt targets with these prefixes.
EXEMPTION_PREFIXES = [
# TODO(fxbug.dev/56885): cargo-gnaw should generate sources files for third party crates.
'//third_party/rust_crates:',
]
def parse_depfile(depfile_path):
with open(depfile_path) as f:
# Only the first line contains important information.
line = f.readline().strip()
# The depfile format looks like `target: dep1 dep2 dep3...`
# We assume the target is the one we care about and that dep1, dep2, dep3, etc. are
# source files.
#
# We use `os.path.relpath` to convert paths like '../../out/default/foo/bar' to the
# canonical 'foo/bar', which is how the expected inputs are expressed.
return set(
os.path.relpath(os.path.normpath(source))
for source in line[line.find(':') + 1:].split(' ')
if source.strip())
def build_file_source_path(target, source_path):
'''Returns a source path suitable for listing in a BUILD.gn file.
The returned path is relative to the `target` GN label, or source absolute if the
`source_path` is not a descendent of the `target`.
Eg. for `target` of '//src/sys/component_manager:bin':
when source_path='../../src/sys/component_manager/src/main.rs'
return 'src/main.rs'
when source_path='../../prebuilts/assets/font.ttf'
return '//prebuilts/assets/font.ttf'
'''
while source_path.startswith('../'):
source_path = source_path[3:]
target_dir = target[2:].split(':')[0]
if source_path.startswith(target_dir):
return os.path.relpath(source_path, start=target_dir)
return '//{}'.format(source_path)
def print_suggested_sources(varname, sources):
'''Prints a GN list variable assignment with the variable name `varname`.
Eg.
sources = [
"src/main.rs",
"src/foo.rs",
]
'''
print(' {} = ['.format(varname), file=sys.stderr)
for source in sources:
print(' "{}",'.format(source), file=sys.stderr)
print(' ]', file=sys.stderr)
def main():
parser = argparse.ArgumentParser(
description=
'Verifies that the compiler-emitted depfile strictly contains the expected source files'
)
parser.add_argument(
'-t',
'--target_label',
required=True,
help='GN target label being checked')
parser.add_argument(
'-d',
'--depfile',
required=True,
help='path to compiler emitted depfile')
parser.add_argument(
'expected_sources',
nargs='*',
help='path to the expected list of source files')
args = parser.parse_args()
# Check for opt-out.
if args.expected_sources and args.expected_sources[0].endswith(
'/build/rust/__SKIP_ENFORCEMENT__.rs'):
return 0
# Ignore specific target exemptions.
for prefix in EXEMPTION_PREFIXES:
if args.target_label.startswith(prefix):
return 0
expected_sources = set(args.expected_sources)
actual_sources = parse_depfile(args.depfile)
unlisted_sources = actual_sources.difference(expected_sources)
if unlisted_sources:
# There is a mismatch in expected sources and actual sources used by the compiler.
# We don't treat overly-specified sources as an error. Ninja will still complain
# if those source files don't exist.
for source in unlisted_sources:
print(
'error: source file `{}` was used during compilation but not listed in BUILD.gn'
.format(source),
file=sys.stderr)
print(
'note: the BUILD.gn file for {} should have the following:\n'.
format(args.target_label),
file=sys.stderr)
rust_sources = [
build_file_source_path(args.target_label, source)
for source in actual_sources
if source.endswith('.rs')
]
if rust_sources:
print_suggested_sources('sources', rust_sources)
non_rust_sources = [
build_file_source_path(args.target_label, source)
for source in actual_sources
if not source.endswith('.rs')
]
if non_rust_sources:
print_suggested_sources('inputs', non_rust_sources)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.229167 | 96 | 0.639302 |
0b428d7feddad672a028a463af01c41fee757894 | 23,919 | py | Python | environ/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/session.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | 1 | 2022-03-12T08:56:51.000Z | 2022-03-12T08:56:51.000Z | environ/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/session.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | 1 | 2022-03-17T13:12:17.000Z | 2022-03-17T13:12:17.000Z | environ/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/session.py | EsauKip/Blog-post | e5716af25139ab4e867767990f04f0749fc9bf40 | [
"MIT"
] | null | null | null | # ext/asyncio/session.py
# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from . import engine
from . import result as _result
from .base import ReversibleProxy
from .base import StartableContext
from .result import _ensure_sync_result
from ... import util
from ...orm import object_session
from ...orm import Session
from ...orm import state as _instance_state
from ...util.concurrency import greenlet_spawn
_EXECUTE_OPTIONS = util.immutabledict({"prebuffer_rows": True})
_STREAM_OPTIONS = util.immutabledict({"stream_results": True})
@util.create_proxy_methods(
Session,
":class:`_orm.Session`",
":class:`_asyncio.AsyncSession`",
classmethods=["object_session", "identity_key"],
methods=[
"__contains__",
"__iter__",
"add",
"add_all",
"expire",
"expire_all",
"expunge",
"expunge_all",
"is_modified",
"in_transaction",
"in_nested_transaction",
],
attributes=[
"dirty",
"deleted",
"new",
"identity_map",
"is_active",
"autoflush",
"no_autoflush",
"info",
],
)
class AsyncSession(ReversibleProxy):
"""Asyncio version of :class:`_orm.Session`.
The :class:`_asyncio.AsyncSession` is a proxy for a traditional
:class:`_orm.Session` instance.
.. versionadded:: 1.4
To use an :class:`_asyncio.AsyncSession` with custom :class:`_orm.Session`
implementations, see the
:paramref:`_asyncio.AsyncSession.sync_session_class` parameter.
"""
_is_asyncio = True
dispatch = None
def __init__(self, bind=None, binds=None, sync_session_class=None, **kw):
r"""Construct a new :class:`_asyncio.AsyncSession`.
All parameters other than ``sync_session_class`` are passed to the
``sync_session_class`` callable directly to instantiate a new
:class:`_orm.Session`. Refer to :meth:`_orm.Session.__init__` for
parameter documentation.
:param sync_session_class:
A :class:`_orm.Session` subclass or other callable which will be used
to construct the :class:`_orm.Session` which will be proxied. This
parameter may be used to provide custom :class:`_orm.Session`
subclasses. Defaults to the
:attr:`_asyncio.AsyncSession.sync_session_class` class-level
attribute.
.. versionadded:: 1.4.24
"""
kw["future"] = True
if bind:
self.bind = bind
bind = engine._get_sync_engine_or_connection(bind)
if binds:
self.binds = binds
binds = {
key: engine._get_sync_engine_or_connection(b)
for key, b in binds.items()
}
if sync_session_class:
self.sync_session_class = sync_session_class
self.sync_session = self._proxied = self._assign_proxied(
self.sync_session_class(bind=bind, binds=binds, **kw)
)
sync_session_class = Session
"""The class or callable that provides the
underlying :class:`_orm.Session` instance for a particular
:class:`_asyncio.AsyncSession`.
At the class level, this attribute is the default value for the
:paramref:`_asyncio.AsyncSession.sync_session_class` parameter. Custom
subclasses of :class:`_asyncio.AsyncSession` can override this.
At the instance level, this attribute indicates the current class or
callable that was used to provide the :class:`_orm.Session` instance for
this :class:`_asyncio.AsyncSession` instance.
.. versionadded:: 1.4.24
"""
sync_session: Session
"""Reference to the underlying :class:`_orm.Session` this
:class:`_asyncio.AsyncSession` proxies requests towards.
This instance can be used as an event target.
.. seealso::
:ref:`asyncio_events`
"""
async def refresh(
self, instance, attribute_names=None, with_for_update=None
):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
This is the async version of the :meth:`_orm.Session.refresh` method.
See that method for a complete description of all options.
.. seealso::
:meth:`_orm.Session.refresh` - main documentation for refresh
"""
return await greenlet_spawn(
self.sync_session.refresh,
instance,
attribute_names=attribute_names,
with_for_update=with_for_update,
)
async def run_sync(self, fn, *arg, **kw):
"""Invoke the given sync callable passing sync self as the first
argument.
This method maintains the asyncio event loop all the way through
to the database connection by running the given callable in a
specially instrumented greenlet.
E.g.::
with AsyncSession(async_engine) as session:
await session.run_sync(some_business_method)
.. note::
The provided callable is invoked inline within the asyncio event
loop, and will block on traditional IO calls. IO within this
callable should only call into SQLAlchemy's asyncio database
APIs which will be properly adapted to the greenlet context.
.. seealso::
:ref:`session_run_sync`
"""
return await greenlet_spawn(fn, self.sync_session, *arg, **kw)
async def execute(
self,
statement,
params=None,
execution_options=util.EMPTY_DICT,
bind_arguments=None,
**kw
):
"""Execute a statement and return a buffered
:class:`_engine.Result` object.
.. seealso::
:meth:`_orm.Session.execute` - main documentation for execute
"""
if execution_options:
execution_options = util.immutabledict(execution_options).union(
_EXECUTE_OPTIONS
)
else:
execution_options = _EXECUTE_OPTIONS
result = await greenlet_spawn(
self.sync_session.execute,
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw
)
return await _ensure_sync_result(result, self.execute)
async def scalar(
self,
statement,
params=None,
execution_options=util.EMPTY_DICT,
bind_arguments=None,
**kw
):
"""Execute a statement and return a scalar result.
.. seealso::
:meth:`_orm.Session.scalar` - main documentation for scalar
"""
result = await self.execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw
)
return result.scalar()
async def scalars(
self,
statement,
params=None,
execution_options=util.EMPTY_DICT,
bind_arguments=None,
**kw
):
"""Execute a statement and return scalar results.
:return: a :class:`_result.ScalarResult` object
.. versionadded:: 1.4.24
.. seealso::
:meth:`_orm.Session.scalars` - main documentation for scalars
:meth:`_asyncio.AsyncSession.stream_scalars` - streaming version
"""
result = await self.execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw
)
return result.scalars()
async def get(
self,
entity,
ident,
options=None,
populate_existing=False,
with_for_update=None,
identity_token=None,
):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
.. seealso::
:meth:`_orm.Session.get` - main documentation for get
"""
return await greenlet_spawn(
self.sync_session.get,
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
)
async def stream(
self,
statement,
params=None,
execution_options=util.EMPTY_DICT,
bind_arguments=None,
**kw
):
"""Execute a statement and return a streaming
:class:`_asyncio.AsyncResult` object."""
if execution_options:
execution_options = util.immutabledict(execution_options).union(
_STREAM_OPTIONS
)
else:
execution_options = _STREAM_OPTIONS
result = await greenlet_spawn(
self.sync_session.execute,
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw
)
return _result.AsyncResult(result)
async def stream_scalars(
self,
statement,
params=None,
execution_options=util.EMPTY_DICT,
bind_arguments=None,
**kw
):
"""Execute a statement and return a stream of scalar results.
:return: an :class:`_asyncio.AsyncScalarResult` object
.. versionadded:: 1.4.24
.. seealso::
:meth:`_orm.Session.scalars` - main documentation for scalars
:meth:`_asyncio.AsyncSession.scalars` - non streaming version
"""
result = await self.stream(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw
)
return result.scalars()
async def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
As this operation may need to cascade along unloaded relationships,
it is awaitable to allow for those queries to take place.
.. seealso::
:meth:`_orm.Session.delete` - main documentation for delete
"""
return await greenlet_spawn(self.sync_session.delete, instance)
async def merge(self, instance, load=True, options=None):
"""Copy the state of a given instance into a corresponding instance
within this :class:`_asyncio.AsyncSession`.
.. seealso::
:meth:`_orm.Session.merge` - main documentation for merge
"""
return await greenlet_spawn(
self.sync_session.merge, instance, load=load, options=options
)
async def flush(self, objects=None):
"""Flush all the object changes to the database.
.. seealso::
:meth:`_orm.Session.flush` - main documentation for flush
"""
await greenlet_spawn(self.sync_session.flush, objects=objects)
def get_transaction(self):
"""Return the current root transaction in progress, if any.
:return: an :class:`_asyncio.AsyncSessionTransaction` object, or
``None``.
.. versionadded:: 1.4.18
"""
trans = self.sync_session.get_transaction()
if trans is not None:
return AsyncSessionTransaction._retrieve_proxy_for_target(trans)
else:
return None
def get_nested_transaction(self):
"""Return the current nested transaction in progress, if any.
:return: an :class:`_asyncio.AsyncSessionTransaction` object, or
``None``.
.. versionadded:: 1.4.18
"""
trans = self.sync_session.get_nested_transaction()
if trans is not None:
return AsyncSessionTransaction._retrieve_proxy_for_target(trans)
else:
return None
def get_bind(self, mapper=None, clause=None, bind=None, **kw):
"""Return a "bind" to which the synchronous proxied :class:`_orm.Session`
is bound.
Unlike the :meth:`_orm.Session.get_bind` method, this method is
currently **not** used by this :class:`.AsyncSession` in any way
in order to resolve engines for requests.
.. note::
This method proxies directly to the :meth:`_orm.Session.get_bind`
method, however is currently **not** useful as an override target,
in contrast to that of the :meth:`_orm.Session.get_bind` method.
The example below illustrates how to implement custom
:meth:`_orm.Session.get_bind` schemes that work with
:class:`.AsyncSession` and :class:`.AsyncEngine`.
The pattern introduced at :ref:`session_custom_partitioning`
illustrates how to apply a custom bind-lookup scheme to a
:class:`_orm.Session` given a set of :class:`_engine.Engine` objects.
To apply a corresponding :meth:`_orm.Session.get_bind` implementation
for use with a :class:`.AsyncSession` and :class:`.AsyncEngine`
objects, continue to subclass :class:`_orm.Session` and apply it to
:class:`.AsyncSession` using
:paramref:`.AsyncSession.sync_session_class`. The inner method must
continue to return :class:`_engine.Engine` instances, which can be
acquired from a :class:`_asyncio.AsyncEngine` using the
:attr:`_asyncio.AsyncEngine.sync_engine` attribute::
# using example from "Custom Vertical Partitioning"
import random
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.orm import Session, sessionmaker
# construct async engines w/ async drivers
engines = {
'leader':create_async_engine("sqlite+aiosqlite:///leader.db"),
'other':create_async_engine("sqlite+aiosqlite:///other.db"),
'follower1':create_async_engine("sqlite+aiosqlite:///follower1.db"),
'follower2':create_async_engine("sqlite+aiosqlite:///follower2.db"),
}
class RoutingSession(Session):
def get_bind(self, mapper=None, clause=None, **kw):
# within get_bind(), return sync engines
if mapper and issubclass(mapper.class_, MyOtherClass):
return engines['other'].sync_engine
elif self._flushing or isinstance(clause, (Update, Delete)):
return engines['leader'].sync_engine
else:
return engines[
random.choice(['follower1','follower2'])
].sync_engine
# apply to AsyncSession using sync_session_class
AsyncSessionMaker = sessionmaker(
class_=AsyncSession,
sync_session_class=RoutingSession
)
The :meth:`_orm.Session.get_bind` method is called in a non-asyncio,
implicitly non-blocking context in the same manner as ORM event hooks
and functions that are invoked via :meth:`.AsyncSession.run_sync`, so
routines that wish to run SQL commands inside of
:meth:`_orm.Session.get_bind` can continue to do so using
blocking-style code, which will be translated to implicitly async calls
at the point of invoking IO on the database drivers.
""" # noqa E501
return self.sync_session.get_bind(
mapper=mapper, clause=clause, bind=bind, **kw
)
async def connection(self, **kw):
r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to
this :class:`.Session` object's transactional state.
This method may also be used to establish execution options for the
database connection used by the current transaction.
.. versionadded:: 1.4.24 Added **kw arguments which are passed through
to the underlying :meth:`_orm.Session.connection` method.
.. seealso::
:meth:`_orm.Session.connection` - main documentation for
"connection"
"""
sync_connection = await greenlet_spawn(
self.sync_session.connection, **kw
)
return engine.AsyncConnection._retrieve_proxy_for_target(
sync_connection
)
def begin(self, **kw):
"""Return an :class:`_asyncio.AsyncSessionTransaction` object.
The underlying :class:`_orm.Session` will perform the
"begin" action when the :class:`_asyncio.AsyncSessionTransaction`
object is entered::
async with async_session.begin():
# .. ORM transaction is begun
Note that database IO will not normally occur when the session-level
transaction is begun, as database transactions begin on an
on-demand basis. However, the begin block is async to accommodate
for a :meth:`_orm.SessionEvents.after_transaction_create`
event hook that may perform IO.
For a general description of ORM begin, see
:meth:`_orm.Session.begin`.
"""
return AsyncSessionTransaction(self)
def begin_nested(self, **kw):
"""Return an :class:`_asyncio.AsyncSessionTransaction` object
which will begin a "nested" transaction, e.g. SAVEPOINT.
Behavior is the same as that of :meth:`_asyncio.AsyncSession.begin`.
For a general description of ORM begin nested, see
:meth:`_orm.Session.begin_nested`.
"""
return AsyncSessionTransaction(self, nested=True)
async def rollback(self):
"""Rollback the current transaction in progress."""
return await greenlet_spawn(self.sync_session.rollback)
async def commit(self):
"""Commit the current transaction in progress."""
return await greenlet_spawn(self.sync_session.commit)
async def close(self):
"""Close out the transactional resources and ORM objects used by this
:class:`_asyncio.AsyncSession`.
This expunges all ORM objects associated with this
:class:`_asyncio.AsyncSession`, ends any transaction in progress and
:term:`releases` any :class:`_asyncio.AsyncConnection` objects which
this :class:`_asyncio.AsyncSession` itself has checked out from
associated :class:`_asyncio.AsyncEngine` objects. The operation then
leaves the :class:`_asyncio.AsyncSession` in a state which it may be
used again.
.. tip::
The :meth:`_asyncio.AsyncSession.close` method **does not prevent
the Session from being used again**. The
:class:`_asyncio.AsyncSession` itself does not actually have a
distinct "closed" state; it merely means the
:class:`_asyncio.AsyncSession` will release all database
connections and ORM objects.
.. seealso::
:ref:`session_closing` - detail on the semantics of
:meth:`_asyncio.AsyncSession.close`
"""
return await greenlet_spawn(self.sync_session.close)
async def invalidate(self):
"""Close this Session, using connection invalidation.
For a complete description, see :meth:`_orm.Session.invalidate`.
"""
return await greenlet_spawn(self.sync_session.invalidate)
@classmethod
async def close_all(self):
"""Close all :class:`_asyncio.AsyncSession` sessions."""
return await greenlet_spawn(self.sync_session.close_all)
async def __aenter__(self):
return self
async def __aexit__(self, type_, value, traceback):
await self.close()
def _maker_context_manager(self):
# no @contextlib.asynccontextmanager until python3.7, gr
return _AsyncSessionContextManager(self)
class _AsyncSessionContextManager:
def __init__(self, async_session):
self.async_session = async_session
async def __aenter__(self):
self.trans = self.async_session.begin()
await self.trans.__aenter__()
return self.async_session
async def __aexit__(self, type_, value, traceback):
await self.trans.__aexit__(type_, value, traceback)
await self.async_session.__aexit__(type_, value, traceback)
class AsyncSessionTransaction(ReversibleProxy, StartableContext):
"""A wrapper for the ORM :class:`_orm.SessionTransaction` object.
This object is provided so that a transaction-holding object
for the :meth:`_asyncio.AsyncSession.begin` may be returned.
The object supports both explicit calls to
:meth:`_asyncio.AsyncSessionTransaction.commit` and
:meth:`_asyncio.AsyncSessionTransaction.rollback`, as well as use as an
async context manager.
.. versionadded:: 1.4
"""
__slots__ = ("session", "sync_transaction", "nested")
def __init__(self, session, nested=False):
self.session = session
self.nested = nested
self.sync_transaction = None
@property
def is_active(self):
return (
self._sync_transaction() is not None
and self._sync_transaction().is_active
)
def _sync_transaction(self):
if not self.sync_transaction:
self._raise_for_not_started()
return self.sync_transaction
async def rollback(self):
"""Roll back this :class:`_asyncio.AsyncTransaction`."""
await greenlet_spawn(self._sync_transaction().rollback)
async def commit(self):
"""Commit this :class:`_asyncio.AsyncTransaction`."""
await greenlet_spawn(self._sync_transaction().commit)
async def start(self, is_ctxmanager=False):
self.sync_transaction = self._assign_proxied(
await greenlet_spawn(
self.session.sync_session.begin_nested
if self.nested
else self.session.sync_session.begin
)
)
if is_ctxmanager:
self.sync_transaction.__enter__()
return self
async def __aexit__(self, type_, value, traceback):
await greenlet_spawn(
self._sync_transaction().__exit__, type_, value, traceback
)
def async_object_session(instance):
"""Return the :class:`_asyncio.AsyncSession` to which the given instance
belongs.
This function makes use of the sync-API function
:class:`_orm.object_session` to retrieve the :class:`_orm.Session` which
refers to the given instance, and from there links it to the original
:class:`_asyncio.AsyncSession`.
If the :class:`_asyncio.AsyncSession` has been garbage collected, the
return value is ``None``.
This functionality is also available from the
:attr:`_orm.InstanceState.async_session` accessor.
:param instance: an ORM mapped instance
:return: an :class:`_asyncio.AsyncSession` object, or ``None``.
.. versionadded:: 1.4.18
"""
session = object_session(instance)
if session is not None:
return async_session(session)
else:
return None
def async_session(session):
"""Return the :class:`_asyncio.AsyncSession` which is proxying the given
:class:`_orm.Session` object, if any.
:param session: a :class:`_orm.Session` instance.
:return: a :class:`_asyncio.AsyncSession` instance, or ``None``.
.. versionadded:: 1.4.18
"""
return AsyncSession._retrieve_proxy_for_target(session, regenerate=False)
_instance_state._async_provider = async_session
| 31.807181 | 84 | 0.632552 |
60dcd4e6eea802b7eef96589df1b8a530d3f3765 | 14,269 | py | Python | tests/test_registry.py | rablack/skoopy | c0e5d35dd9dea3266d4030a7ff8c9891cab57fec | [
"MIT"
] | 1 | 2020-08-05T04:52:51.000Z | 2020-08-05T04:52:51.000Z | tests/test_registry.py | rablack/skoopy | c0e5d35dd9dea3266d4030a7ff8c9891cab57fec | [
"MIT"
] | 29 | 2018-05-14T20:52:54.000Z | 2018-07-09T19:37:52.000Z | tests/test_registry.py | rablack/skoopy | c0e5d35dd9dea3266d4030a7ff8c9891cab57fec | [
"MIT"
] | null | null | null | """
Test cases for the skoopy.registry module
"""
import unittest
import sys
import io
import os
import tempfile
import json
# If this test is being executed standalone, add '..' to the path
# to start searching for packages from the top level of the app.
if __name__ == "__main__":
sys.path.insert(0, '..')
from skoopy.registry import SkoobotRegistry
class TestSkootbotRegistry(unittest.TestCase):
"""
Test case for the SkoobotRegistry class
"""
def setUp(self):
registryFd, self.tempPath = tempfile.mkstemp(suffix=".json", prefix="skoobot_test", text=True)
self.skooName = "TestSkoobot"
self.skooAddr = "00:44:00:bb:55:ff"
self.skooDupName = "DuplicateSkoobot"
self.skooDupAddr1 = "00:00:00:00:00:01"
self.skooDupAddr2 = "00:00:00:00:00:02"
self.registryDict = {
"default" : self.skooName,
"skoobots" : {
self.skooAddr : self.skooName,
self.skooDupAddr1 : self.skooDupName,
self.skooDupAddr2 : self.skooDupName
}
}
with open(self.tempPath, "w") as registryFile:
json.dump(self.registryDict, registryFile, indent=4)
os.close(registryFd)
def tearDown(self):
os.remove(self.tempPath)
def testConstruct(self):
"""
Test construction with a non-existent file
and the JSON file created during setup
"""
with self.subTest("Empty registry"):
emptyRegistry = SkoobotRegistry("~/nonexistent.json")
self.assertEqual(dict(), emptyRegistry.registry)
self.assertEqual(True, emptyRegistry.valid)
self.assertEqual(None, emptyRegistry.getDefaultName())
# Make sure that ~ in the filename was expanded
self.assertNotIn("~", emptyRegistry.registryPath)
with self.subTest("setUp() registry"):
registry = SkoobotRegistry(self.tempPath)
self.assertEqual(3, len(registry.registry))
self.assertEqual(True, registry.valid)
self.assertEqual(self.skooName, registry.getDefaultName())
def testGetSkoobotsByName(self):
"""
Test the getSkoobotsByName() method
The method should return a list of (addr, name) tuples
for all skoobots matching name
"""
setUpRegistry = SkoobotRegistry(self.tempPath)
names = (self.skooName, self.skooDupName, "nobody", None)
for name in names:
with self.subTest(name=name):
skoobots = setUpRegistry.getSkoobotsByName(name)
if name == self.skooDupName:
self.assertEqual(2, len(skoobots))
for skoobot in skoobots:
self.assertEqual(self.skooDupName, skoobot[1])
# Make a list of just the addresses
skooDupAddrs = [skoo[0] for skoo in skoobots]
self.assertIn(self.skooDupAddr1, skooDupAddrs)
self.assertIn(self.skooDupAddr2, skooDupAddrs)
elif name == self.skooName:
self.assertEqual(1, len(skoobots))
# There is only 1 skoobot, so test it
skoobot = skoobots[0]
self.assertEqual(self.skooName, skoobot[1])
self.assertEqual(self.skooAddr, skoobot[0])
else:
self.assertEqual(0, len(skoobots))
def testGetSkoobotByAddress(self):
"""
Test the getSkoobotsByAddress() method
The method should return a list of (addr, name) tupes
for the skoobot matching addr, if any. Addresses are unique
so there cannot be more than one. We verify uniqueness in
the adding tests.
"""
registry = SkoobotRegistry(self.tempPath)
addrs = (self.skooAddr, self.skooDupAddr1, self.skooDupAddr2, "nomatch", None)
matchExpected = (self.skooAddr, self.skooDupAddr1, self.skooDupAddr2)
for addr in addrs:
expectedLen = 1 if addr in matchExpected else 0
with self.subTest(addr=addr, expectedLen=expectedLen):
skoobots = registry.getSkoobotsByAddress(addr)
self.assertEqual(expectedLen, len(skoobots))
if expectedLen == 1:
# There is exactly 1 skoobot in the list, so use it.
skoobot = skoobots[0]
if addr == self.skooAddr:
self.assertEqual(addr, skoobot[0])
self.assertEqual(self.skooName, skoobot[1])
else:
self.assertEqual(addr, skoobot[0])
self.assertEqual(self.skooDupName, skoobot[1])
def testAddSkoobot(self):
"""
Test addition of skoobots using the addSkoobot() method
The method adds a skoobot to the registry using an address
and an optional name.
"""
registry = SkoobotRegistry(self.tempPath)
namedAddr = "ff:ff:ff:ff:ff:ff"
namedName = "newSkoobot"
unnamedAddr = "ff:ff:ff:ff:ff:fe"
with self.subTest("Add named Skoobot"):
registry.addSkoobot(namedAddr, namedName)
self.assertEqual(4, len(registry.registry))
self.assertEqual(1, len(registry.getSkoobotsByAddress(namedAddr)))
self.assertEqual(1, len(registry.getSkoobotsByName(namedName)))
with self.subTest("Add unnamed Skoobot"):
registry.addSkoobot(unnamedAddr)
self.assertEqual(5, len(registry.registry))
skoobots = registry.getSkoobotsByAddress(unnamedAddr)
self.assertEqual(1, len(skoobots))
self.assertIn(skoobots[0][1], registry.skoobotNames)
with self.subTest("Add duplicate Skoobot"):
# Bug #7: By default this replaces the existing
# skoobot. If the replace=False parameter is set,
# it raises a RuntimeError unless the parameters
# are compatible with the existing entry.
#
# It is always true that it does not result in a
# duplicate address.
registry.addSkoobot(namedAddr, namedName)
self.assertEqual(5, len(registry.registry))
registry.addSkoobot(namedAddr, replace=False)
self.assertEqual(5, len(registry.registry))
with self.assertRaises(RuntimeError):
registry.addSkoobot(unnamedAddr, namedName, replace=False)
with self.subTest("Test invalid parameters"):
with self.assertRaises(TypeError):
registry.addSkoobot((namedAddr, namedName))
with self.assertRaises(TypeError):
registry.addSkoobot(namedAddr, (namedAddr, namedName))
def testSetDefault(self):
"""
Test for method setDefault()
Method sets the default name. It takes one parameter, which is either
the address or the name.
"""
registry = SkoobotRegistry(self.tempPath)
registry.setDefault(self.skooDupName)
self.assertEqual(self.skooDupName, registry.getDefaultName())
registry.setDefault(self.skooAddr)
self.assertEqual(self.skooName, registry.getDefaultName())
registry.setDefault(self.skooDupAddr1)
self.assertEqual(self.skooDupName, registry.getDefaultName())
def testGetDefaultName(self):
"""
Test for method getDefaultName()
Method gets the default name.
"""
registry = SkoobotRegistry(self.tempPath)
self.assertEqual(self.skooName, registry.getDefaultName())
def testLoad(self):
"""
Test for loading the registry.
Most of this is already tested by the constructor tests,
however, we need to check that a reload works and that a
failed load sets the valid flag to false
"""
registry = SkoobotRegistry(self.tempPath)
with self.subTest("Empty dict"):
emptyDict = {}
with open(self.tempPath, "w") as registryFile:
json.dump(emptyDict, registryFile)
self.assertEqual(3, len(registry.registry))
registry.load()
self.assertEqual(0, len(registry.registry))
self.assertEqual(True, registry.valid)
self.assertEqual(None, registry.getDefaultName())
with self.subTest("Invalid dict"):
with open(self.tempPath, "w") as registryFile:
registryFile.write("rubbish")
registry.addSkoobot(self.skooAddr)
self.assertEqual(True, registry.valid)
with self.assertRaises(json.JSONDecodeError):
registry.load()
self.assertEqual(0, len(registry.registry))
self.assertEqual(False, registry.valid)
self.assertEqual(None, registry.getDefaultName())
with self.subTest("Reload good dict"):
with open(self.tempPath, "w") as registryFile:
json.dump(self.registryDict, registryFile)
self.assertEqual(0, len(registry.registry))
registry.load()
self.assertEqual(3, len(registry.registry))
self.assertEqual(True, registry.valid)
self.assertEqual(self.skooName, registry.getDefaultName())
def testSave(self):
"""
Tests for the save() method
Make sure that save() works, except when the registry is
marked invalid.
"""
registry = SkoobotRegistry(self.tempPath)
altSkooAddr = "aa:aa:aa:aa:aa:aa"
altSkooName = "Alt"
extraSkooAddr = "ee:ee:ee:ee:ee:ee"
extraSkooName = "Extra"
with self.subTest("Undo alterations"):
registry.addSkoobot(altSkooAddr, altSkooName)
registry.setDefault(altSkooAddr)
self.assertEqual(4, len(registry.registry))
registry.load()
self.assertEqual(3, len(registry.registry))
self.assertEqual(self.skooName, registry.getDefaultName())
with self.subTest("Alter and save"):
registry.addSkoobot(altSkooAddr, altSkooName)
registry.setDefault(altSkooAddr)
self.assertEqual(4, len(registry.registry))
# Save the state with the AltSkootbot entry
registry.save()
registry.addSkoobot(extraSkooAddr, extraSkooName)
registry.setDefault(extraSkooAddr)
self.assertEqual(5, len(registry.registry))
self.assertEqual(extraSkooName, registry.getDefaultName())
# Restore to the save() state
registry.load()
self.assertEqual(4, len(registry.registry))
self.assertEqual(altSkooName, registry.getDefaultName())
with self.subTest("Don't save invalid"):
registry.addSkoobot(extraSkooAddr, altSkooName)
registry.setDefault(extraSkooAddr)
self.assertEqual(5, len(registry.registry))
registry.valid = False
# Fail to save the state with the Extra entry
registry.save()
# Restore to the previous save() state
registry.load()
self.assertEqual(4, len(registry.registry))
self.assertEqual(altSkooName, registry.getDefaultName())
def testGenerateName(self):
"""
Tests for the generateName() method
"""
registry = SkoobotRegistry(self.tempPath)
altSkooAddr = "aa:aa:aa:aa:aa:aa"
altSkooName = "Alt"
with self.subTest("Generate name from default list"):
name = registry.generateName()
self.assertIn(name, registry.skoobotNames)
with self.subTest("Generate Alt name"):
registry.skoobotNames = set([altSkooName])
name = registry.generateName()
self.assertEqual(altSkooName, name)
with self.subTest("Names all used"):
registry.skoobotNames = set([altSkooName])
registry.addSkoobot(altSkooAddr)
with self.assertRaises(KeyError):
name = registry.generateName()
def testBug8(self):
"""
Tests the resolution of bug #8
"""
badDefaultName = "gremlin"
registry = SkoobotRegistry(self.tempPath)
with self.subTest("Setting bad default"):
oldDefault = registry.getDefaultName()
with self.assertRaises(ValueError):
registry.setDefault(badDefaultName)
self.assertEqual(oldDefault, registry.getDefaultName())
with self.subTest("Loading bad default"):
self.registryDict["default"] = badDefaultName
with open(self.tempPath, "w") as registryFile:
json.dump(self.registryDict, registryFile, indent=4)
registry.load()
self.assertEqual(None, registry.getDefaultName())
with self.subTest("Loading good default"):
self.registryDict["default"] = self.skooName
with open(self.tempPath, "w") as registryFile:
json.dump(self.registryDict, registryFile, indent=4)
registry.load()
self.assertEqual(self.skooName, registry.getDefaultName())
def testBug11(self):
"""
Tests the resolution of bug #11
"Registry setDefault() does strange things if given a
list of lists as a parameter"
Check that it raises a TypeError when called with
something other than String or None.
It turns out that the error only triggers with tuples.
"""
registry = SkoobotRegistry(self.tempPath)
with self.subTest("Valid arguments"):
registry.setDefault(None)
self.assertEqual(None, registry.getDefaultName())
registry.setDefault(self.skooName)
self.assertEqual(self.skooName, registry.getDefaultName())
with self.subTest("Invalid arguments"):
with self.assertRaises(TypeError):
registry.setDefault(("test",))
if __name__ == "__main__":
unittest.main()
| 38.152406 | 102 | 0.604597 |
bb847103ee6ed91a476641e0d961f3c443e4edee | 1,945 | py | Python | Mass-DM/discum-scraper.py | devhill535/dmmass | 24220574adb1a16d679d8c98c536f18cc2e0cac1 | [
"MIT"
] | 81 | 2021-10-12T13:42:23.000Z | 2022-03-19T15:11:16.000Z | Mass-DM/discum-scraper.py | devhill535/dmmass | 24220574adb1a16d679d8c98c536f18cc2e0cac1 | [
"MIT"
] | 4 | 2021-11-01T01:22:15.000Z | 2022-01-18T01:30:24.000Z | Mass-DM/discum-scraper.py | devhill535/dmmass | 24220574adb1a16d679d8c98c536f18cc2e0cac1 | [
"MIT"
] | 50 | 2021-10-13T00:15:42.000Z | 2022-03-24T13:29:16.000Z | import time
start = time.time()
import sys
import subprocess
# python -m pip install --user --upgrade git+https://github.com/Merubokkusu/Discord-S.C.U.M#egg=discum
try:
import discum
except ImportError:
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", '--user', "--upgrade",
"git+https://github.com/Merubokkusu/Discord-S.C.U.M#egg=discum"])
except:
subprocess.check_call([sys.executable, "-m", "pip", "install", 'discum'])
import os
import json
with open('config.json') as f:
yamete_kudasai = json.load(f)
token = yamete_kudasai['token']
bot = discum.Client(token=token)
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
lenmembersfetched = len(bot.gateway.session.guild(guild_id).members) #this line is optional
print(str(lenmembersfetched)+' members fetched') #this line is optional
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
def get_members(guild_id, channel_id):
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1) #get all user attributes, wait 1 second between requests
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession() #saves 10 seconds when gateway is run again
return bot.gateway.session.guild(guild_id).members
members = get_members('guild id here', 'channel id here')
memberslist = []
with open("ids.json", "r") as file:
data = json.load(file)
total_scraped = 0
for memberID in members:
if memberID not in data:
total_scraped += 1
data.append(int(memberID))
print(f"{total_scraped}/{len(members)} - {memberID}")
with open("ids.json", "w") as file:
json.dump(data, file)
end = time.time()
print(f"Scraped {total_scraped} User IDs successfully\nTime Taken: {end - start}s")
| 38.9 | 127 | 0.701799 |
7e36bd31008e6c4511168b0f2bfd17a8ff842bfb | 11,989 | py | Python | examples/speech_to_text/data/speech_to_text_dataset_with_src.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 2 | 2021-09-14T06:42:08.000Z | 2021-11-09T21:15:18.000Z | examples/speech_to_text/data/speech_to_text_dataset_with_src.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | null | null | null | examples/speech_to_text/data/speech_to_text_dataset_with_src.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 3 | 2021-09-06T10:18:39.000Z | 2021-12-29T10:52:51.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import logging
import os.path as op
from typing import Dict, List, Optional, Tuple
import torch
from fairseq.data import (
ConcatDataset,
Dictionary,
ResamplingDataset,
data_utils as fairseq_data_utils,
)
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig, SpeechToTextDatasetCreator, \
SpeechToTextDataset, _collate_frames
logger = logging.getLogger(__name__)
class S2TDataConfigSrc(S2TDataConfig):
"""Wrapper class for data config YAML"""
def __init__(self, yaml_path):
super().__init__(yaml_path)
@property
def vocab_filename_src(self):
"""fairseq source vocabulary file under data root"""
return self.config.get("vocab_filename_src", "dict.txt")
@property
def bpe_tokenizer_src(self) -> Dict:
"""Subword tokenizer to apply after pre-tokenization. Returning
a dictionary with `bpe` providing the tokenizer name and
the other items providing the tokenizer-specific arguments.
Tokenizers are defined in `fairseq.data.encoders.*`"""
return self.config.get("bpe_tokenizer_src", {"bpe": None})
class SpeechToTextDatasetWithSrc(SpeechToTextDataset):
def __init__(
self,
split: str,
is_train_split: bool,
data_cfg: S2TDataConfigSrc,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
src_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
bpe_tokenizer_src=None,
):
super().__init__(split, is_train_split, data_cfg, audio_paths, n_frames,
src_texts, tgt_texts, speakers, src_langs, tgt_langs,
ids, tgt_dict, pre_tokenizer, bpe_tokenizer)
self.split, self.is_train_split = split, is_train_split
self.data_cfg = data_cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
assert (src_dict is None and src_texts is None) or (
src_dict is not None and src_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.tgt_dict = tgt_dict
self.src_dict = src_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = data_cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.data_cfg.get_feature_transforms(split, is_train_split)
)
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
self.bpe_tokenizer_src = bpe_tokenizer_src
logger.info(self.__repr__())
def tokenize_text_src(self, text: str):
if self.pre_tokenizer is not None:
text = self.pre_tokenizer.encode(text)
if self.bpe_tokenizer_src is not None:
text = self.bpe_tokenizer_src.encode(text)
return text
def __getitem__(
self, index: int
) -> Tuple[int, torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
index, source, target = super().__getitem__(index)
transcript = None
if self.src_texts is not None:
tokenized = self.tokenize_text_src(self.src_texts[index])
transcript = self.src_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=True
).long()
return index, source, target, transcript
def collater(self, samples: List[Tuple[int, torch.Tensor, torch.Tensor, torch.Tensor]]) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([i for i, _, _, _ in samples], dtype=torch.long)
frames = _collate_frames(
[s for _, s, _, _ in samples], self.data_cfg.use_audio_input
)
# sort samples by descending number of frames
n_frames = torch.tensor([s.size(0) for _, s, _, _ in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[t for _, _, t, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[t.size(0) for _, _, t, _ in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[t for _, _, t, _ in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(t.size(0) for _, _, t, _ in samples)
# Source transcripts
transcript, transcript_lengths = None, None
prev_transcript_tokens = None
ntokens_transcript = None
if self.src_texts is not None:
transcript = fairseq_data_utils.collate_tokens(
[t for _, _, _, t in samples],
self.src_dict.pad(),
self.src_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
transcript = transcript.index_select(0, order)
transcript_lengths = torch.tensor(
[t.size(0) for _, _, _, t in samples], dtype=torch.long
).index_select(0, order)
prev_transcript_tokens = fairseq_data_utils.collate_tokens(
[t for _, _, _, t in samples],
self.src_dict.pad(),
self.src_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
prev_transcript_tokens = prev_transcript_tokens.index_select(0, order)
ntokens_transcript = sum(t.size(0) for _, _, _, t in samples)
out = {
"id": indices,
"net_input": {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
"prev_transcript_tokens": prev_transcript_tokens,
},
"target": target,
"target_lengths": target_lengths,
"transcript": transcript,
"transcript_lengths": transcript_lengths,
"ntokens": ntokens,
"ntokens_transcript": ntokens_transcript,
"nsentences": len(samples),
}
return out
class SpeechToTextDatasetCreatorWithSrc(SpeechToTextDatasetCreator):
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[List[Dict]],
data_cfg: S2TDataConfigSrc,
tgt_dict,
src_dict,
pre_tokenizer,
bpe_tokenizer,
bpe_tokenizer_src,
) -> SpeechToTextDatasetWithSrc:
audio_paths, n_frames, src_texts, tgt_texts, ids = [], [], [], [], []
speakers, src_langs, tgt_langs = [], [], []
for s in samples:
ids.extend([ss[cls.KEY_ID] for ss in s])
audio_paths.extend(
[op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s]
)
n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])
tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])
src_texts.extend([ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s])
speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])
src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])
tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])
return SpeechToTextDatasetWithSrc(
split_name,
is_train_split,
data_cfg,
audio_paths,
n_frames,
src_texts,
tgt_texts,
speakers,
src_langs,
tgt_langs,
ids,
tgt_dict,
src_dict,
pre_tokenizer,
bpe_tokenizer,
bpe_tokenizer_src
)
@classmethod
def from_tsv(
cls,
root: str,
data_cfg: S2TDataConfigSrc,
splits: str,
tgt_dict,
src_dict,
pre_tokenizer,
bpe_tokenizer,
bpe_tokenizer_src,
is_train_split: bool,
epoch: int,
seed: int,
) -> SpeechToTextDatasetWithSrc:
samples = []
_splits = splits.split(",")
for split in _splits:
tsv_path = op.join(root, f"{split}.tsv")
if not op.isfile(tsv_path):
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples.append([dict(e) for e in reader])
assert len(samples) > 0
datasets = [
cls._from_list(
name,
is_train_split,
[s],
data_cfg,
tgt_dict,
src_dict,
pre_tokenizer,
bpe_tokenizer,
bpe_tokenizer_src
)
for name, s in zip(_splits, samples)
]
if is_train_split and len(_splits) > 1 and data_cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls._get_size_ratios(
_splits, [len(s) for s in samples], alpha=data_cfg.sampling_alpha
)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for d, r in zip(datasets, size_ratios)
]
return ConcatDataset(datasets)
| 38.303514 | 101 | 0.568855 |
012bbc2daad5a81adce4f226475179ecc85b2fb8 | 17,869 | py | Python | keras/utils/tf_utils.py | Bhavay192/keras | ed6ca50cceb2a071f86e5e9af5076b1d62fd2531 | [
"Apache-2.0"
] | 1 | 2022-03-12T16:50:13.000Z | 2022-03-12T16:50:13.000Z | keras/utils/tf_utils.py | sairamadithya/keras | 42bf9972492f47c3d3c249de9c20942ba217937d | [
"Apache-2.0"
] | null | null | null | keras/utils/tf_utils.py | sairamadithya/keras | 42bf9972492f47c3d3c249de9c20942ba217937d | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow-related utilities."""
import collections
import copy
import random
from keras import backend
from keras.engine import keras_tensor
from keras.utils import object_identity
from keras.utils import tf_contextlib
import numpy as np
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import keras_export
# pylint: enable=g-direct-tensorflow-import
@keras_export('keras.utils.set_random_seed', v1=[])
def set_random_seed(seed):
"""Sets all random seeds for the program (Python, NumPy, and TensorFlow).
You can use this utility to make almost any Keras program fully deterministic.
Some limitations apply in cases where network communications are involved
(e.g. parameter server distribution), which creates additional sources of
randomness, or when certain non-deterministic cuDNN ops are involved.
Calling this utility is equivalent to the following:
```python
import random
import numpy as np
import tensorflow as tf
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
```
Arguments:
seed: Integer, the random seed to use.
"""
if not isinstance(seed, int):
raise ValueError(
'Expected `seed` argument to be an integer. '
f'Received: seed={seed} (of type {type(seed)})')
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
backend._SEED_GENERATOR.generator = random.Random(seed) # pylint:disable=protected-access
def is_tensor_or_tensor_list(v):
v = tf.nest.flatten(v)
if v and isinstance(v[0], tf.Tensor):
return True
else:
return False
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors/ops reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
inputs = tf.nest.flatten(inputs, expand_composites=True)
reachable = object_identity.ObjectIdentitySet(inputs)
if targets:
remaining_targets = object_identity.ObjectIdentitySet(tf.nest.flatten(targets))
queue = collections.deque(inputs)
while queue:
x = queue.pop()
if isinstance(x, tuple(_user_convertible_tensor_types)):
# Can't find consumers of user-specific types.
continue
if isinstance(x, tf.Operation):
outputs = x.outputs[:] or []
outputs += x._control_outputs # pylint: disable=protected-access
elif isinstance(x, tf.Variable):
try:
outputs = [x.op]
except AttributeError:
# Variables can be created in an Eager context.
outputs = []
elif tf.is_tensor(x):
outputs = x.consumers()
else:
raise TypeError(
f'Expected tf.Operation, tf.Variable, or tf.Tensor. Received: {x}')
for y in outputs:
if y not in reachable:
reachable.add(y)
if targets:
remaining_targets.discard(y)
queue.appendleft(y)
if targets and not remaining_targets:
return reachable
return reachable
# This function needs access to private functions of `nest`.
# pylint: disable=protected-access
def map_structure_with_atomic(is_atomic_fn, map_fn, nested):
"""Maps the atomic elements of a nested structure.
Args:
is_atomic_fn: A function that determines if an element of `nested` is
atomic.
map_fn: The function to apply to atomic elements of `nested`.
nested: A nested structure.
Returns:
The nested structure, with atomic elements mapped according to `map_fn`.
Raises:
ValueError: If an element that is neither atomic nor a sequence is
encountered.
"""
if is_atomic_fn(nested):
return map_fn(nested)
# Recursively convert.
if not tf.nest.is_nested(nested):
raise ValueError(
f'Received non-atomic and non-sequence element: {nested}')
if tf.__internal__.nest.is_mapping(nested):
values = [nested[k] for k in sorted(nested.keys())]
elif tf.__internal__.nest.is_attrs(nested):
values = _astuple(nested)
else:
values = nested
mapped_values = [
map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values
]
return tf.__internal__.nest.sequence_like(nested, mapped_values)
def get_shapes(tensors):
"""Gets shapes from tensors."""
return tf.nest.map_structure(
lambda x: x.shape if hasattr(x, 'shape') else None, tensors)
# pylint: enable=protected-access
def convert_shapes(input_shape, to_tuples=True):
"""Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Args:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
Raises:
ValueError: when the input tensor shape can't be converted to tuples, eg
unknown tensor shape.
"""
def _is_shape_component(value):
return value is None or isinstance(value, (int, tf.compat.v1.Dimension))
def _is_atomic_shape(input_shape):
# Ex: TensorShape or (None, 10, 32) or 5 or `None`
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tf.TensorShape):
return True
if (isinstance(input_shape, (tuple, list)) and
all(_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tf.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape,
input_shape)
class ListWrapper:
"""A wrapper for lists to be treated as elements for `nest`."""
def __init__(self, list_to_wrap):
self._list = list_to_wrap
def as_list(self):
return self._list
def convert_inner_node_data(nested, wrap=False):
"""Either wraps or unwraps innermost node data lists in `ListWrapper` objects.
Args:
nested: A nested data structure.
wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,
unwraps `ListWrapper` objects into lists.
Returns:
Structure of same type as nested, with lists wrapped/unwrapped.
"""
def _is_serialized_node_data(nested):
# Node data can be of form `[layer_name, node_id, tensor_id]` or
# `[layer_name, node_id, tensor_id, kwargs]`.
if (isinstance(nested, list) and (len(nested) in [3, 4]) and
isinstance(nested[0], str)):
return True
return False
def _is_atomic_nested(nested):
"""Returns `True` if `nested` is a list representing node data."""
if isinstance(nested, ListWrapper):
return True
if _is_serialized_node_data(nested):
return True
return not tf.nest.is_nested(nested)
def _convert_object_or_list(nested):
"""Convert b/t `ListWrapper` object and list representations."""
if wrap:
if isinstance(nested, ListWrapper):
return nested
if _is_serialized_node_data(nested):
return ListWrapper(nested)
return nested
else:
if isinstance(nested, ListWrapper):
return nested.as_list()
return nested
return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list,
nested)
def shape_type_conversion(fn):
"""Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Args:
fn: function to wrap.
Returns:
Wrapped function.
"""
def wrapper(instance, input_shape):
# Pass shapes as tuples to `fn`
# This preserves compatibility with external Keras.
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
# Return shapes from `fn` as TensorShapes.
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
def are_all_symbolic_tensors(tensors):
return all(map(is_symbolic_tensor, tensors))
_user_convertible_tensor_types = set()
def is_extension_type(tensor):
"""Returns whether a tensor is of an ExtensionType.
github.com/tensorflow/community/pull/269
Currently it works by checking if `tensor` is a `CompositeTensor` instance,
but this will be changed to use an appropriate extensiontype protocol
check once ExtensionType is made public.
Args:
tensor: An object to test
Returns:
True if the tensor is an extension type object, false if not.
"""
return isinstance(tensor, tf.__internal__.CompositeTensor)
def is_symbolic_tensor(tensor):
"""Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Args:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors.
"""
if isinstance(tensor, tf.Tensor):
return hasattr(tensor, 'graph')
elif is_extension_type(tensor):
component_tensors = tf.nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, 'graph') for t in component_tensors)
elif isinstance(tensor, tf.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (getattr(tensor, '_keras_history', False) or
not tf.executing_eagerly())
elif isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
return is_symbolic_tensor(tensor)
else:
return False
@keras_export('keras.__internal__.utils.register_symbolic_tensor_type', v1=[])
def register_symbolic_tensor_type(cls):
"""Allows users to specify types regarded as symbolic `Tensor`s.
Used in conjunction with `tf.register_tensor_conversion_function`, calling
`tf.keras.__internal__.utils.register_symbolic_tensor_type(cls)`
allows non-`Tensor` objects to be plumbed through Keras layers.
Example:
```python
# One-time setup.
class Foo:
def __init__(self, input_):
self._input = input_
def value(self):
return tf.constant(42.)
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value())
tf.keras.__internal__.utils.register_symbolic_tensor_type(Foo)
# User-land.
layer = tf.keras.layers.Lambda(lambda input_: Foo(input_))
```
Args:
cls: A `class` type which shall be regarded as a symbolic `Tensor`.
"""
global _user_convertible_tensor_types
if cls not in _user_convertible_tensor_types:
keras_tensor.register_keras_tensor_specialization(
cls, keras_tensor.UserRegisteredTypeKerasTensor)
_user_convertible_tensor_types.add(cls)
def type_spec_from_value(value):
"""Grab type_spec without converting array-likes to tensors."""
if is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
return tf.TensorSpec(value.shape, value.dtype)
else:
return tf.type_spec_from_value(value)
def is_ragged(tensor):
"""Returns true if `tensor` is a ragged tensor or ragged tensor value."""
return isinstance(
tensor,
(tf.RaggedTensor, tf.compat.v1.ragged.RaggedTensorValue))
def is_sparse(tensor):
"""Returns true if `tensor` is a sparse tensor or sparse tensor value."""
return isinstance(
tensor,
(tf.SparseTensor, tf.compat.v1.SparseTensorValue))
def is_tensor_or_variable(x):
return tf.is_tensor(x) or isinstance(x, tf.Variable)
def assert_no_legacy_layers(layers):
"""Prevent tf.layers.Layers from being used with Keras.
Certain legacy layers inherit from their keras analogs; however they are
not supported with keras and can lead to subtle and hard to diagnose bugs.
Args:
layers: A list of layers to check
Raises:
TypeError: If any elements of layers are tf.layers.Layers
"""
# isinstance check for tf.layers.Layer introduces a circular dependency.
legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]
if legacy_layers:
layer_str = '\n'.join(' ' + str(l) for l in legacy_layers)
raise TypeError(
f'The following are legacy tf.layers.Layers:\n{layer_str}\n'
'To use keras as a '
'framework (for instance using the Network, Model, or Sequential '
'classes), please use the tf.keras.layers implementation instead. '
'(Or, if writing custom layers, subclass from tf.keras.layers rather '
'than tf.layers)')
@tf_contextlib.contextmanager
def maybe_init_scope(layer):
"""Open an `init_scope` if in V2 mode and using the keras graph.
Args:
layer: The Layer/Model that is currently active.
Yields:
None
"""
# Don't open an init_scope in V1 mode or when using legacy tf.layers.
if (tf.compat.v1.executing_eagerly_outside_functions() and
getattr(layer, '_keras_style', True)):
with tf.init_scope():
yield
else:
yield
@tf_contextlib.contextmanager
def graph_context_for_symbolic_tensors(*args, **kwargs):
"""Returns graph context manager if any of the inputs is a symbolic tensor."""
if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())):
with backend.get_graph().as_default():
yield
else:
yield
def dataset_is_infinite(dataset):
"""True if the passed dataset is infinite."""
if tf.compat.v1.executing_eagerly_outside_functions():
return tf.equal(
tf.data.experimental.cardinality(dataset), tf.data.experimental.INFINITE_CARDINALITY)
else:
dataset_size = backend.get_session().run(
tf.data.experimental.cardinality(dataset))
return dataset_size == tf.data.experimental.INFINITE_CARDINALITY
def get_tensor_spec(t, dynamic_batch=False, name=None):
"""Returns a `TensorSpec` given a single `Tensor` or `TensorSpec`."""
# pylint: disable=protected-access
if isinstance(t, tf.TypeSpec):
spec = t
elif is_extension_type(t):
# TODO(b/148821952): Should these specs have a name attr?
spec = t._type_spec
elif (hasattr(t, '_keras_history') and
hasattr(t._keras_history[0], '_type_spec')):
return t._keras_history[0]._type_spec
elif hasattr(t, 'shape') and hasattr(t, 'dtype'):
spec = tf.TensorSpec(shape=t.shape, dtype=t.dtype, name=name)
else:
return None # Allow non-Tensors to pass through.
# pylint: enable=protected-access
if not dynamic_batch:
return spec
shape = spec.shape
if shape.rank is None or shape.rank == 0:
return spec
shape_list = shape.as_list()
shape_list[0] = None
# TODO(b/203201161) Remove this deepcopy one type_spec_with_shape has been
# updated to not mutate spec.
spec = copy.deepcopy(spec)
return keras_tensor.type_spec_with_shape(spec, tf.TensorShape(shape_list))
def sync_to_numpy_or_python_type(tensors):
"""Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Async strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are
forced to
sync during this process.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
if isinstance(tensors, tf.distribute.experimental.coordinator.RemoteValue):
return tensors.fetch()
def _to_single_numpy_or_python_type(t):
if isinstance(t, tf.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
def _astuple(attrs):
"""Converts the given attrs to tuple non-recursively."""
cls = type(attrs)
fields = getattr(cls, '__attrs_attrs__', None)
if fields is None:
raise ValueError(f'{cls} is not an attrs-decorated class.')
values = []
for field in fields:
values.append(getattr(attrs, field.name))
return tuple(values)
| 31.184991 | 93 | 0.715205 |
b7dfc03aa8df2ede7e0d65481e17cd9961294899 | 3,367 | py | Python | vi/cmd_base.py | my-personal-forks/Vintageous | 797ec8f1b0ef12d045949dc19850ba34a4f1c557 | [
"MIT"
] | 1,146 | 2015-01-05T02:23:09.000Z | 2022-03-23T07:22:50.000Z | vi/cmd_base.py | my-personal-forks/Vintageous | 797ec8f1b0ef12d045949dc19850ba34a4f1c557 | [
"MIT"
] | 315 | 2015-01-03T12:42:12.000Z | 2020-09-24T20:01:47.000Z | vi/cmd_base.py | my-personal-forks/Vintageous | 797ec8f1b0ef12d045949dc19850ba34a4f1c557 | [
"MIT"
] | 138 | 2015-01-04T17:54:46.000Z | 2021-02-26T14:56:09.000Z | class cmd_types:
"""
Types of command.
"""
MOTION = 1
ACTION = 2
ANY = 3
OTHER = 4
USER = 5
OPEN_NAME_SPACE = 6
class ViCommandDefBase(object):
"""
Base class for all Vim commands.
"""
_serializable = ['_inp',]
def __init__(self):
# the name of the st command wrapped by this class
self.command = '<unset>'
self.input_parser = None
self._inp = ''
def __getitem__(self, key):
# XXX: For compatibility. Should be removed eventually.
return self.__dict__[key]
def __str__(self):
return '<{0} ({1})>'.format(self.__class__.__qualname__,
self.command)
@property
def accept_input(self):
return False
@property
def inp(self):
"""
Current input for this command.
"""
return self._inp
def accept(self, key):
"""
Processes input for this command.
"""
_name = self.__class__.__name__
assert self.input_parser, '{0} does not provide an input parser'.format(_name)
raise NotImplementedError(
'{0} must implement .accept()'.format(_name))
def reset(self):
self._inp = ''
def translate(self, state):
"""
Returns the command as a valid Json object containing all necessary
data to be run by Vintageous. This is usually the last step before
handing the command off to ST.
Every motion and operator must override this method.
@state
The current state.
"""
raise NotImplementedError('command {0} must implement .translate()'
.format(self.__class__.__name__)
)
@classmethod
def from_json(cls, data):
"""
Instantiates a command from a valid Json object representing one.
@data
Serialized command data as provided by .serialize().
"""
instance = cls()
instance.__dict__.update(data)
return instance
def serialize(self):
"""
Returns a valid Json object representing this command in a format
Vintageous uses internally.
"""
data = {'name': self.__class__.__name__,
'data': {k: v for k, v in self.__dict__.items()
if k in self._serializable}
}
return data
class ViMissingCommandDef(ViCommandDefBase):
def translate(self):
raise TypeError(
'ViMissingCommandDef should not be used as a runnable command'
)
class ViMotionDef(ViCommandDefBase):
"""
Base class for all motions.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.updates_xpos = False
self.scroll_into_view = False
self.type = cmd_types.MOTION
class ViOperatorDef(ViCommandDefBase):
"""
Base class for all operators.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.updates_xpos = False
self.scroll_into_view = False
self.motion_required = False
self.type = cmd_types.ACTION
self.repeatable = False
| 26.511811 | 86 | 0.555985 |
c1a1067f71031f9c9b0e3c1f4b571e8978a0f17a | 2,629 | py | Python | rllib/examples/coin_game_env.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | rllib/examples/coin_game_env.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | 41 | 2021-09-21T01:13:48.000Z | 2022-03-19T07:12:22.000Z | rllib/examples/coin_game_env.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | ##########
# Contribution by the Center on Long-Term Risk:
# https://github.com/longtermrisk/marltoolbox
##########
import argparse
import os
import ray
from ray import tune
from ray.rllib.algorithms.ppo import PPO
from ray.rllib.examples.env.coin_game_non_vectorized_env import CoinGame, AsymCoinGame
parser = argparse.ArgumentParser()
parser.add_argument("--tf", action="store_true")
parser.add_argument("--stop-iters", type=int, default=2000)
def main(debug, stop_iters=2000, tf=False, asymmetric_env=False):
train_n_replicates = 1 if debug else 1
seeds = list(range(train_n_replicates))
ray.init()
stop = {
"training_iteration": 2 if debug else stop_iters,
}
env_config = {
"players_ids": ["player_red", "player_blue"],
"max_steps": 20,
"grid_size": 3,
"get_additional_info": True,
}
rllib_config = {
"env": AsymCoinGame if asymmetric_env else CoinGame,
"env_config": env_config,
"multiagent": {
"policies": {
env_config["players_ids"][0]: (
None,
AsymCoinGame(env_config).OBSERVATION_SPACE,
AsymCoinGame.ACTION_SPACE,
{},
),
env_config["players_ids"][1]: (
None,
AsymCoinGame(env_config).OBSERVATION_SPACE,
AsymCoinGame.ACTION_SPACE,
{},
),
},
"policy_mapping_fn": lambda agent_id, **kwargs: agent_id,
},
# Size of batches collected from each worker.
"rollout_fragment_length": 20,
# Number of timesteps collected for each SGD round.
# This defines the size of each SGD epoch.
"train_batch_size": 512,
"model": {
"dim": env_config["grid_size"],
"conv_filters": [
[16, [3, 3], 1],
[32, [3, 3], 1],
], # [Channel, [Kernel, Kernel], Stride]]
},
"lr": 5e-3,
"seed": tune.grid_search(seeds),
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": "tf" if tf else "torch",
}
tune_analysis = tune.run(
PPO,
config=rllib_config,
stop=stop,
checkpoint_freq=0,
checkpoint_at_end=True,
name="PPO_AsymCG",
)
ray.shutdown()
return tune_analysis
if __name__ == "__main__":
args = parser.parse_args()
debug_mode = True
use_asymmetric_env = False
main(debug_mode, args.stop_iters, args.tf, use_asymmetric_env)
| 29.211111 | 86 | 0.564854 |
1a03b2bea8426f7e81b17aa529b4ab975e9c1bb9 | 1,275 | py | Python | third_party/logilab/astroid/brain/brain_qt.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | third_party/logilab/astroid/brain/brain_qt.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 28 | 2020-03-04T22:01:48.000Z | 2022-03-12T00:59:47.000Z | third_party/logilab/astroid/brain/brain_qt.py | stdft112/depot_tools | 52c7211807930272424213ff6127c209de790eca | [
"BSD-3-Clause"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | """Astroid hooks for the PyQT library."""
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
from astroid import nodes
from astroid import parse
def _looks_like_signal(node, signal_name='pyqtSignal'):
if '__class__' in node._instance_attrs:
cls = node._instance_attrs['__class__'][0]
return cls.name == signal_name
return False
def transform_pyqt_signal(node):
module = parse('''
class pyqtSignal(object):
def connect(self, slot, type=None, no_receiver_check=False):
pass
def disconnect(self, slot):
pass
def emit(self, *args):
pass
''')
signal_cls = module['pyqtSignal']
node._instance_attrs['emit'] = signal_cls['emit']
node._instance_attrs['disconnect'] = signal_cls['disconnect']
node._instance_attrs['connect'] = signal_cls['connect']
def pyqt4_qtcore_transform():
return AstroidBuilder(MANAGER).string_build('''
def SIGNAL(signal_name): pass
class QObject(object):
def emit(self, signal): pass
''')
register_module_extender(MANAGER, 'PyQt4.QtCore', pyqt4_qtcore_transform)
MANAGER.register_transform(nodes.FunctionDef, transform_pyqt_signal,
_looks_like_signal) | 28.977273 | 73 | 0.694902 |
836ea89c11f708a16ec8979de2bfe01c0b3ae7b7 | 10,071 | py | Python | scripts/process_schemas.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 114 | 2015-01-05T22:19:34.000Z | 2017-02-18T18:51:22.000Z | scripts/process_schemas.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 608 | 2015-01-06T00:24:39.000Z | 2017-03-09T05:29:16.000Z | scripts/process_schemas.py | david4096/ga4gh-schemas | 774db498cc047cc64cc070325472c7dba60e6d42 | [
"Apache-2.0"
] | 98 | 2015-01-12T18:09:52.000Z | 2017-02-15T15:49:17.000Z | """
A script to generate the schemas for the GA4GH protocol. These are generated
from a copy of the Protocol Buffers schema and use it to generate
the Python class definitions. These are also stored in revision
control to aid Travis building.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path
import subprocess
import fnmatch
import re
import argparse
import shlex
# IMPORTANT!
# Do not import any ga4gh or otherwise non-standard packages in this file.
# process_schemas is included in ga4gh-schema's install path in setup.py.
# Importing, for instance, ga4gh-common here will break an install if
# the environment does not have that package installed previously.
# We really want to avoid this scenario!
# (This does result in some code duplication in this file.)
# Below code duplicated from ga4gh-common
def runCommandSplits(splits, silent=False, shell=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
splits, stdout=devnull, stderr=devnull, shell=shell)
else:
subprocess.check_call(splits, shell=shell)
except OSError as exception:
if exception.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise
def runCommand(command, silent=False, shell=False):
"""
Run a shell command
"""
splits = shlex.split(command)
runCommandSplits(splits, silent=silent, shell=shell)
# Above code duplicated from ga4gh-common
class ProtobufGenerator(object):
def __init__(self, version):
self.version = version
def _createSchemaFiles(self, destPath, schemasPath):
"""
Create a hierarchy of proto files in a destination directory, copied
from the schemasPath hierarchy
"""
# Create the target directory hierarchy, if neccessary
ga4ghPath = os.path.join(destPath, 'ga4gh')
if not os.path.exists(ga4ghPath):
os.mkdir(ga4ghPath)
ga4ghSchemasPath = os.path.join(ga4ghPath, 'schemas')
if not os.path.exists(ga4ghSchemasPath):
os.mkdir(ga4ghSchemasPath)
ga4ghSchemasGa4ghPath = os.path.join(ga4ghSchemasPath, 'ga4gh')
if not os.path.exists(ga4ghSchemasGa4ghPath):
os.mkdir(ga4ghSchemasGa4ghPath)
ga4ghSchemasGooglePath = os.path.join(ga4ghSchemasPath, 'google')
if not os.path.exists(ga4ghSchemasGooglePath):
os.mkdir(ga4ghSchemasGooglePath)
ga4ghSchemasGoogleApiPath = os.path.join(
ga4ghSchemasGooglePath, 'api')
if not os.path.exists(ga4ghSchemasGoogleApiPath):
os.mkdir(ga4ghSchemasGoogleApiPath)
# rewrite the proto files to the destination
for root, dirs, files in os.walk(schemasPath):
for protoFilePath in fnmatch.filter(files, '*.proto'):
src = os.path.join(root, protoFilePath)
dst = os.path.join(
ga4ghSchemasPath,
os.path.relpath(root, schemasPath), protoFilePath)
self._copySchemaFile(src, dst)
def _doLineReplacements(self, line):
"""
Given a line of a proto file, replace the line with one that is
appropriate for the hierarchy that we want to compile
"""
# ga4gh packages
packageString = 'package ga4gh;'
if packageString in line:
return line.replace(
packageString,
'package ga4gh.schemas.ga4gh;')
importString = 'import "ga4gh/'
if importString in line:
return line.replace(
importString,
'import "ga4gh/schemas/ga4gh/')
# google packages
googlePackageString = 'package google.api;'
if googlePackageString in line:
return line.replace(
googlePackageString,
'package ga4gh.schemas.google.api;')
googleImportString = 'import "google/api/'
if googleImportString in line:
return line.replace(
googleImportString,
'import "ga4gh/schemas/google/api/')
optionString = 'option (google.api.http)'
if optionString in line:
return line.replace(
optionString,
'option (.ga4gh.schemas.google.api.http)')
return line
def _copySchemaFile(self, src, dst):
"""
Copy a proto file to the temporary directory, with appropriate
line replacements
"""
with open(src) as srcFile, open(dst, 'w') as dstFile:
srcLines = srcFile.readlines()
for srcLine in srcLines:
toWrite = self._doLineReplacements(srcLine)
dstFile.write(toWrite)
def _find_in_path(self, cmd):
PATH = os.environ.get("PATH", os.defpath).split(os.pathsep)
for x in PATH:
possible = os.path.join(x, cmd)
if os.path.exists(possible):
return possible
return None
def _assertSchemasExist(self, schemas_path):
if not os.path.exists(schemas_path):
raise Exception(
"Can't find schemas folder. " +
"Thought it would be at {}".format(
os.path.realpath(schemas_path)))
def _assertProtoDirectoryExists(self, source_path):
if not os.path.exists(source_path):
msg = "Can't find source proto directory {}".format(
os.path.realpath(source_path))
raise Exception(msg)
# From http://stackoverflow.com/a/1714190/320546
def _version_compare(self, version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def _getProtoc(self, destination_path):
protocs = [
os.path.realpath(x) for x in
"{}/protobuf/src/protoc".format(destination_path),
self._find_in_path("protoc")
if x is not None]
protoc = None
for c in protocs:
if not os.path.exists(c):
continue
output = subprocess.check_output([c, "--version"]).strip()
try:
(lib, version) = output.split(" ")
if lib != "libprotoc":
raise Exception("lib didn't match 'libprotoc'")
if self._version_compare("3.0.0", version) > 0:
raise Exception("version < 3.0.0")
protoc = c
break
except Exception:
print(
"Not using {path} because it returned " +
"'{version}' rather than \"libprotoc <version>\", where " +
"<version> >= 3.0.0").format(path=c, format=output)
if protoc is None:
raise Exception("Can't find a good protoc. Tried {}".format(
protocs))
print("Using protoc: '{}'".format(protoc))
return protoc
def _writePythonFiles(self, source_path, protoc, destination_path):
protos = []
for root, dirs, files in os.walk(source_path):
protos.extend([
os.path.join(root, f)
for f in fnmatch.filter(files, "*.proto")])
if len(protos) == 0:
raise Exception(
"Didn't find any proto files in '{}'".format(source_path))
print("pb2 files destination: '{}'".format(destination_path))
cmdString = (
"{protoc} -I {source_path} -I ./src/main "
"--python_out={destination_path} {proto_files}")
cmd = cmdString.format(
protoc=protoc, source_path=source_path,
destination_path=destination_path,
proto_files=" ".join(protos))
runCommand(cmd)
print("{} pb2 files written".format(len(protos)))
def _writeVersionFile(self):
versionFilePath = "python/ga4gh/schemas/_protocol_version.py"
with open(versionFilePath, "w") as version_file:
version_file.write(
"# File generated by scripts/process_schemas.py; "
"do not edit\n")
version_file.write("version = '{}'\n".format(self.version))
def run(self, args):
script_path = os.path.dirname(os.path.realpath(__file__))
destination_path = os.path.realpath(
os.path.join(script_path, args.destpath))
schemas_path = os.path.realpath(args.schemapath)
protoc = self._getProtoc(destination_path)
print("Writing protocol version '{}'".format(args.version))
print("Proto files source: '{}'".format(schemas_path))
print("Rewritten proto files source: '{}'".format(destination_path))
self._createSchemaFiles(destination_path, schemas_path)
self._writePythonFiles(destination_path, protoc, destination_path)
self._writeVersionFile()
def main(args=None):
defaultDestPath = "../python/"
defaultSchemasPath = '../src/main/proto/'
parser = argparse.ArgumentParser(
description="Script to process GA4GH Protocol buffer schemas")
parser.add_argument(
"version", help="Version number of the schema we're compiling")
parser.add_argument(
"-s", "--schemapath", default=defaultSchemasPath,
help="Path to schemas (defaults to {})".format(defaultSchemasPath))
parser.add_argument(
"-d", "--destpath", default=defaultDestPath,
help=(
"the directory in which to write the compiled schema files "
"(defaults to {})".format(defaultDestPath)))
parsedArgs = parser.parse_args(args)
pb = ProtobufGenerator(parsedArgs.version)
pb.run(parsedArgs)
if __name__ == "__main__":
main()
| 38.292776 | 79 | 0.609373 |
7fc91148bc1e9b29d531a8be91a14ad260e5efa1 | 1,240 | py | Python | asreview/models/query/__init__.py | qubixes/automated-systematic-review | 742811a2d300a0cb5a9bd4d1fbada2be8ee86e71 | [
"MIT"
] | null | null | null | asreview/models/query/__init__.py | qubixes/automated-systematic-review | 742811a2d300a0cb5a9bd4d1fbada2be8ee86e71 | [
"MIT"
] | null | null | null | asreview/models/query/__init__.py | qubixes/automated-systematic-review | 742811a2d300a0cb5a9bd4d1fbada2be8ee86e71 | [
"MIT"
] | null | null | null | # Copyright 2019 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asreview.models.query.max import MaxQuery
from asreview.models.query.mixed import MixedQuery
from asreview.models.query.uncertainty import UncertaintyQuery
from asreview.models.query.random import RandomQuery
from asreview.models.query.cluster import ClusterQuery
from asreview.models.query.utils import get_query_model
from asreview.models.query.utils import get_query_class
from asreview.models.query.utils import list_query_strategies
"""Query strategies query records to label by the user.
There are several query strategies available. In configuration files,
parameters are found under the section ``[query_param]``.
"""
| 42.758621 | 74 | 0.804032 |
fbad493f616aa97655e75625af18a7e4befb4fc7 | 5,091 | py | Python | Full Stack Web Developer Nanodegree v2/P1 - Fyyur Artist Booking Site/forms.py | vkbhandare/Udacity-1 | da3e5df92857421609f9ed101e029c6d533cbb72 | [
"MIT"
] | 149 | 2017-01-14T09:54:05.000Z | 2022-03-06T20:55:08.000Z | Full Stack Web Developer Nanodegree v2/P1 - Fyyur Artist Booking Site/forms.py | vkbhandare/Udacity-1 | da3e5df92857421609f9ed101e029c6d533cbb72 | [
"MIT"
] | 10 | 2021-03-11T03:48:47.000Z | 2022-02-27T09:24:33.000Z | Full Stack Web Developer Nanodegree v2/P1 - Fyyur Artist Booking Site/forms.py | vkbhandare/Udacity-1 | da3e5df92857421609f9ed101e029c6d533cbb72 | [
"MIT"
] | 130 | 2016-12-30T07:14:29.000Z | 2022-01-03T13:05:46.000Z | from datetime import datetime
from flask_wtf import Form
from wtforms import StringField, SelectField, SelectMultipleField, DateTimeField, BooleanField, ValidationError
from wtforms.validators import DataRequired, AnyOf, URL, Length
import re
state_choices = [
('AL', 'AL'),
('AK', 'AK'),
('AZ', 'AZ'),
('AR', 'AR'),
('CA', 'CA'),
('CO', 'CO'),
('CT', 'CT'),
('DE', 'DE'),
('DC', 'DC'),
('FL', 'FL'),
('GA', 'GA'),
('HI', 'HI'),
('ID', 'ID'),
('IL', 'IL'),
('IN', 'IN'),
('IA', 'IA'),
('KS', 'KS'),
('KY', 'KY'),
('LA', 'LA'),
('ME', 'ME'),
('MT', 'MT'),
('NE', 'NE'),
('NV', 'NV'),
('NH', 'NH'),
('NJ', 'NJ'),
('NM', 'NM'),
('NY', 'NY'),
('NC', 'NC'),
('ND', 'ND'),
('OH', 'OH'),
('OK', 'OK'),
('OR', 'OR'),
('MD', 'MD'),
('MA', 'MA'),
('MI', 'MI'),
('MN', 'MN'),
('MS', 'MS'),
('MO', 'MO'),
('PA', 'PA'),
('RI', 'RI'),
('SC', 'SC'),
('SD', 'SD'),
('TN', 'TN'),
('TX', 'TX'),
('UT', 'UT'),
('VT', 'VT'),
('VA', 'VA'),
('WA', 'WA'),
('WV', 'WV'),
('WI', 'WI'),
('WY', 'WY'),
]
genres_choices = [
('Alternative', 'Alternative'),
('Blues', 'Blues'),
('Classical', 'Classical'),
('Country', 'Country'),
('Electronic', 'Electronic'),
('Folk', 'Folk'),
('Funk', 'Funk'),
('Hip-Hop', 'Hip-Hop'),
('Heavy Metal', 'Heavy Metal'),
('Instrumental', 'Instrumental'),
('Jazz', 'Jazz'),
('Musical Theatre', 'Musical Theatre'),
('Pop', 'Pop'),
('Punk', 'Punk'),
('R&B', 'R&B'),
('Reggae', 'Reggae'),
('Rock n Roll', 'Rock n Roll'),
('Soul', 'Soul'),
('Other', 'Other'),
]
class VenueForm(Form):
def validate_phone(form, field):
if not re.search(r"^[0-9]{3}-[0-9]{3}-[0-9]{4}$", field.data):
raise ValidationError("Invalid phone number.")
def validate_genres(form, field):
genres_values = [choice[1] for choice in genres_choices]
for value in field.data:
if value not in genres_values:
raise ValidationError('Invalid genres value.')
name = StringField(
'name', validators=[DataRequired()]
)
genres = SelectMultipleField(
# TODO implement enum restriction
'genres', validators=[DataRequired()],
choices=genres_choices
)
address = StringField(
'address', validators=[DataRequired(), Length(max=120)]
)
city = StringField(
'city', validators=[DataRequired(), Length(max=120)]
)
state = SelectField(
'state', validators=[DataRequired(), Length(max=120)],
choices=state_choices
)
phone = StringField(
'phone', validators=[DataRequired()]
)
website = StringField(
'website', validators=[DataRequired(), URL(), Length(max=120)]
)
facebook_link = StringField(
'facebook_link', validators=[DataRequired(), URL()]
)
seeking_talent = BooleanField(
'seeking_talent'
)
seeking_description = StringField(
'seeking_description', validators=[Length(max=500)]
)
image_link = StringField(
'image_link', validators=[DataRequired(), URL(), Length(max=500)]
)
# TODO IMPLEMENT NEW ARTIST FORM AND NEW SHOW FORM
class ArtistForm(Form):
def validate_phone(form, field):
if not re.search(r"^[0-9]{3}-[0-9]{3}-[0-9]{4}$", field.data):
raise ValidationError("Invalid phone number.")
def validate_genres(form, field):
genres_values = [choice[1] for choice in genres_choices]
for value in field.data:
if value not in genres_values:
raise ValidationError('Invalid genres value.')
name = StringField(
'name', validators=[DataRequired(), Length(max=120)]
)
city = StringField(
'city', validators=[DataRequired(), Length(max=120)]
)
state = SelectField(
# TODO implement validation logic for state
'state', validators=[DataRequired(), Length(max=120)],
choices=state_choices
)
phone = StringField(
'phone', validators=[DataRequired()]
)
genres = SelectMultipleField(
# TODO implement enum restriction
'genres', validators=[DataRequired()],
choices=genres_choices
)
seeking_venue = BooleanField(
'seeking_venue'
)
seeking_description = StringField(
'seeking_description', validators=[Length(max=500)]
)
website = StringField(
'website', validators=[DataRequired(), URL(), Length(max=120)]
)
image_link = StringField(
'image_link', validators=[DataRequired(), URL(), Length(max=500)]
)
facebook_link = StringField(
'facebook_link', validators=[URL()]
)
class ShowForm(Form):
artist_id = StringField(
'artist_id'
)
venue_id = StringField(
'venue_id'
)
start_time = DateTimeField(
'start_time',
validators=[DataRequired()],
default=datetime.today()
)
| 26.515625 | 111 | 0.539973 |
f74e4fb729c94104b2a30b43aa41b1e48fab2f7d | 771 | py | Python | cea/interfaces/dashboard/api/utils.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null | cea/interfaces/dashboard/api/utils.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null | cea/interfaces/dashboard/api/utils.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | null | null | null |
from flask import current_app
import cea.config
import cea.inputlocator
def deconstruct_parameters(p: cea.config.Parameter):
params = {'name': p.name, 'type': p.typename, 'help': p.help}
try:
params["value"] = p.get()
except cea.ConfigError as e:
print(e)
params["value"] = ""
if isinstance(p, cea.config.ChoiceParameter):
params['choices'] = p._choices
if p.typename == 'WeatherPathParameter':
config = current_app.cea_config
locator = cea.inputlocator.InputLocator(config.scenario)
params['choices'] = {wn: locator.get_weather(
wn) for wn in locator.get_weather_names()}
elif p.typename == 'DatabasePathParameter':
params['choices'] = p._choices
return params | 29.653846 | 65 | 0.648508 |
c80360962ea40a4a981201563801b38e13d38353 | 2,549 | py | Python | quant_eval/scripts/iris/crop_images.py | nudro/favtgan | 51a0fd7b358b1fc019d46efba3153526d5f79828 | [
"Apache-2.0"
] | null | null | null | quant_eval/scripts/iris/crop_images.py | nudro/favtgan | 51a0fd7b358b1fc019d46efba3153526d5f79828 | [
"Apache-2.0"
] | null | null | null | quant_eval/scripts/iris/crop_images.py | nudro/favtgan | 51a0fd7b358b1fc019d46efba3153526d5f79828 | [
"Apache-2.0"
] | null | null | null | import PIL
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import sys
import os
import argparse
"""
Crops a 256 x 768 images of stacked images from the test phase:
real_A, fake_B, real_B and puts them into respective directories. Run
this before evaluation.py
"""
def crop_it(infile_path, RA_out, RB_out, FB_out):
dirs = os.listdir(infile_path)
counter = 0
for item in dirs:
fullpath = os.path.join(infile_path, item)
if os.path.isfile(fullpath):
counter += 1
im = Image.open(fullpath) # open the source image
f, e = os.path.splitext(fullpath) # file and its extension like a1, .png
# do the cropping
real_A = im.crop((0, 0, 256, 256))
fake_B = im.crop((0, 256, 256, 512))
real_B = im.crop((0, 512, 256, 768))
save_rA_fname = os.path.join(RA_out, os.path.basename(f) + '_real_A' + '.png')
save_fB_fname = os.path.join(FB_out, os.path.basename(f) + '_fake_B' + '.png')
save_rB_fname = os.path.join(RB_out, os.path.basename(f) + '_real_B'+ '.png')
real_A.save(save_rA_fname, quality=100)
fake_B.save(save_fB_fname, quality=100)
real_B.save(save_rB_fname, quality=100)
print(counter)
#if counter <=10:
#display(real_A, fake_B, real_B)
def main(inpath, RA_out, RB_out, FB_out):
crop_it(infile_path=inpath, RA_out=RA_out, RB_out=RB_out, FB_out=FB_out)
### MAIN ###
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--inpath", type=str, default="none", help="path to test results original images")
parser.add_argument("--RA_out", type=str, default="none", help="path to real_A visible dir")
parser.add_argument("--RB_out", type=str, default="none", help="path real_B thermal dir")
parser.add_argument("--FB_out", type=str, default="none", help="path to fake_B dir")
parser.add_argument("--experiment", type=str, default="none", help="experiment_name")
opt = parser.parse_args()
print(opt)
# Please note, I updated the dir to "Iris" to evaluate images from Iris test results
# Change back in case you're in the "Eurecom" directory
os.makedirs("quant_eval/Iris/%s/fake_B" % opt.experiment, exist_ok=True)
os.makedirs("quant_eval/Iris/%s/real_B" % opt.experiment, exist_ok=True)
os.makedirs("quant_eval/Iris/%s/real_A" % opt.experiment, exist_ok=True)
main(opt.inpath, opt.RA_out, opt.RB_out, opt.FB_out)
| 40.460317 | 106 | 0.656336 |
071b48920d03481a481f260edd64dcdafd46c7d9 | 23,303 | py | Python | pytorch_trainer/iterators/multiprocess_iterator.py | Hiroshiba/pytorch-trainer | b4b3d648868e4cec33c69e18fc3877c103a8d438 | [
"MIT"
] | 45 | 2019-12-15T04:30:15.000Z | 2021-04-28T14:32:17.000Z | pytorch_trainer/iterators/multiprocess_iterator.py | Hiroshiba/pytorch-trainer | b4b3d648868e4cec33c69e18fc3877c103a8d438 | [
"MIT"
] | null | null | null | pytorch_trainer/iterators/multiprocess_iterator.py | Hiroshiba/pytorch-trainer | b4b3d648868e4cec33c69e18fc3877c103a8d438 | [
"MIT"
] | 2 | 2020-03-27T13:55:27.000Z | 2021-03-03T17:52:56.000Z | from __future__ import division
import datetime
import multiprocessing
from multiprocessing import sharedctypes # type: ignore
import signal
import sys
import threading
import warnings
import numpy
import six
import torch
from pytorch_trainer.dataset import iterator
from pytorch_trainer.iterators import _statemachine
from pytorch_trainer.iterators.order_samplers import ShuffleOrderSampler
_response_time = 0.1
def _raise_timeout_warning():
warnings.warn(
'Stalled dataset is detected. '
'See the documentation of MultiprocessIterator for common causes and '
'workarounds:\n'
'https://docs.chainer.org/en/stable/reference/generated/'
'pytorch_trainer.iterators.MultiprocessIterator.html',
MultiprocessIterator.TimeoutWarning)
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~pytorch_trainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
.. note::
When you are using OpenCV somewhere in your code and the
``MultiprocessIterator`` is used in the training code, the
training loop may get stuck at some point. In such situation,
there are several workarounds to prevent the process got stuck.
1. Set the environment variable as follows: ``OMP_NUM_THREADS=1``
2. Add ``cv2.setNumThreads(0)`` right after ``import cv2`` in your
training script.
3. Use :class:`~pytorch_trainer.iterators.MultithreadIterator` instead of
``MultiprocessIterator``.
Args:
dataset (~pytorch_trainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
dataset_timeout (float): :class:`MultiprocessIterator.TimeoutWarning`
will be issued after this time in seconds elapsed in each dataset
realization. ``None`` to disable the warning. You can turn this
warning into an error by using :func:`warnings.simplefilter`::
warnings.simplefilter(
'error',
pytorch_trainer.iterators.MultiprocessIterator.TimeoutWarning)
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
maxtasksperchild (int): Number of tasks a worker of prefetch process
can complete before it will exit and be replaced with a fresh
worker process, to enable unused resources to be freed. If
``None``, worker processes will live as long as the pool.
"""
class TimeoutWarning(RuntimeWarning):
pass
_interruption_testing = False # for testing
_finalized = False
_prefetch_loop = None
_comm = None
def __init__(self, dataset, batch_size, repeat=True, shuffle=None,
n_processes=None, n_prefetch=1, shared_mem=None,
order_sampler=None, dataset_timeout=30.0,
maxtasksperchild=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self.dataset_timeout = dataset_timeout
self._maxtasksperchild = maxtasksperchild
if self.shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self.shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self._initialize_loop()
def _initialize_loop(self):
self._comm = _Communicator(self.n_prefetch, self.dataset_timeout)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat,
self.n_processes, self.n_prefetch, self.shared_mem,
self._comm, self.order_sampler,
self._interruption_testing, self._maxtasksperchild)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
def __next__(self):
measure_mode = False
if self._prefetch_loop.thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, state = self._prefetch_loop.measure(
self.dataset_timeout)
self._prefetch_loop.launch_thread()
if not measure_mode:
batch, state = self._comm.get()
self._previous_epoch_detail = self.epoch_detail
self._state = state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def finalize(self):
if self._finalized:
return
if self._comm is not None:
self._comm.terminate()
if self._prefetch_loop is not None:
self._prefetch_loop.terminate()
self._comm = None
self._prefetch_loop = None
self._finalized = True
def __copy__(self):
# This function is implemented for backward compatibility.
# Please use `reset` normally.
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, shuffle=None,
n_processes=self.n_processes, n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem, order_sampler=self.order_sampler)
other._reset_state(self.current_position, self.epoch,
self.is_new_epoch, self._state.order)
other._previous_epoch_detail = self._previous_epoch_detail
return other
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def state_dict(self):
state_dict = {
'current_position': self.current_position,
'epoch': self.epoch,
'is_new_epoch': self.is_new_epoch,
}
order = self._state.order.copy()
state_dict['order'] = order
try:
state_dict['previous_epoch_detail'] = self._previous_epoch_detail
except KeyError:
pass
return state_dict
def load_state_dict(self, state_dict):
current_position = state_dict['current_position']
epoch = state_dict['epoch']
is_new_epoch = state_dict['is_new_epoch']
order = self._state.order
if order is not None:
order = state_dict['order']
self._reset_state(
current_position, epoch, is_new_epoch, order)
try:
self._previous_epoch_detail = state_dict['previous_epoch_detail']
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
if self.order_sampler is None:
order = None
else:
order = self.order_sampler(numpy.arange(len(self.dataset)), 0)
self._reset_state(0, 0, False, order)
self._previous_epoch_detail = -1.
def _reset_state(self, current_position, epoch, is_new_epoch, order):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self._state = _statemachine.IteratorState(
current_position, epoch, is_new_epoch, order)
self._comm.reset(self._state)
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
def __getstate__(self):
# We trick the serializer to fill a dict for us
# this allows us to use the same code for both
# pytorch_trainer and pickle serializers
state = self.state_dict()
self._reset_state(self.current_position, self.epoch,
self.is_new_epoch, state['order'])
# Unpickling resets the instance without calling __init__
# Chainer serializers dumps the state in an existing
# object hence we need to save the initial parameters too
init = self.__dict__.copy()
del init['_comm']
del init['_state']
del init['_prefetch_loop']
# TODO(ecastill): When pickling this object there is the risk to copy
# the entire dataset. If the dataset is entirely in memory
# it can be duplicated when spawning new processes.
state['init'] = init
return state
def __setstate__(self, state):
self.__dict__.update(state['init'])
self._initialize_loop()
# Iterator state is restored after initialization
self._reset_state(state['current_position'], state['epoch'],
state['is_new_epoch'], state['order'])
self._previous_epoch_detail = state['previous_epoch_detail']
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch, dataset_timeout):
self.n_prefetch = n_prefetch
self.dataset_timeout = dataset_timeout
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
start = datetime.datetime.now()
while not self._batch_queue:
self._not_empty_cond.wait(_response_time)
dt = datetime.datetime.now() - start
if (self.dataset_timeout is not None
and dt > datetime.timedelta(
seconds=self.dataset_timeout)):
_raise_timeout_warning()
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
_thread = None
_pool = None
_terminating = False
def __init__(self, dataset, batch_size, repeat,
n_processes, n_prefetch, mem_size, comm,
order_sampler,
_interruption_testing, maxtasksperchild):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.n_processes = n_processes
self.mem_size = mem_size
self._comm = comm
self.order_sampler = order_sampler
self.maxtasksperchild = maxtasksperchild
self._allocate_shared_memory()
self._interruption_testing = _interruption_testing
def terminate(self):
self._terminating = True
# Terminate the thread first because it depends on the pool.
if self._thread is not None:
while self._thread.is_alive():
self._thread.join(_response_time)
if self._pool is not None:
self._pool.terminate()
self._thread = None
self._pool = None
@property
def thread(self):
return self._thread
def measure_required(self):
return self.mem_size is None
def measure(self, dataset_timeout):
# dataset_timeout: timeout in seconds or None
status, prefetch_state, _ = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
batch_ret = [None]
def fetch_batch():
batch_ret[0] = [self.dataset[idx] for idx in indices]
if dataset_timeout is None:
# Timeout is not set: fetch synchronously
fetch_batch()
else:
# Timeout is set: fetch asynchronously and watch for timeout
thr = threading.Thread(target=fetch_batch)
thr.daemon = True
thr.start()
thr.join(dataset_timeout)
if thr.is_alive():
_raise_timeout_warning()
thr.join()
batch = batch_ret[0]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk),
maxtasksperchild=self.maxtasksperchild)
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
self._thread = thread
return thread
def _run(self):
# The entry routine of the prefetch thread.
alive = True
try:
while alive:
if self._terminating:
break
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
# Do a single task in the prefetch thread.
# Returns a bool indicating whether the loop should continue running.
status, prefetch_state, reset_count = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self._comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self._comm.put(batch, self.prefetch_state, reset_count)
return True
# Using `parameterized` function (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
numpy.random.seed(multiprocessing.current_process().pid)
torch.manual_seed(multiprocessing.current_process().pid)
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
| 34.625557 | 86 | 0.612582 |
91da0b0920a9aa4f88ec968209965abb11df129f | 1,795 | py | Python | configuration/config.py | nefedov-games/deadly-duck | 30128e1f76acd3a81a1e6a16e07f6e6db0ce6a0d | [
"BSD-4-Clause"
] | 1 | 2016-01-12T22:06:50.000Z | 2016-01-12T22:06:50.000Z | configuration/config.py | nefedov-games/deadly-duck | 30128e1f76acd3a81a1e6a16e07f6e6db0ce6a0d | [
"BSD-4-Clause"
] | 14 | 2015-12-21T17:35:43.000Z | 2019-11-18T06:47:57.000Z | configuration/config.py | montreal91/deadly-duck | 30128e1f76acd3a81a1e6a16e07f6e6db0ce6a0d | [
"BSD-4-Clause"
] | null | null | null |
import json
import os
from typing import Dict
basedir = os.path.abspath(os.path.dirname(__file__))
class DdConfig:
FLASKY_MAIL_SUBJECT_PREFIX = "[Flasky]"
FLASKY_MAIL_SENDER = "Flasky Admin <admin@flasky.com>"
FLASKY_ADMIN = os.environ.get("FLASKY_ADMIN")
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
MEMCACHED_DEFAULT_TIMEOUT = 10 * 60
MEMCACHED_SERVERS = ["127.0.0.1:11211"]
OAUTH_CREDENTIALS_FILE = "configuration/oauth_credentials_dev.json"
SECRET_KEY = os.environ.get("SECRET_KEY") or "go fork yourself"
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
@classmethod
def GetOauthCredentials(cls) -> Dict[str, str]:
credentials: Dict[str, str] = {}
with open(cls.OAUTH_CREDENTIALS_FILE) as credentials_file:
credentials = json.load(credentials_file)
return credentials
@staticmethod
def InitApp(app) -> None:
pass
class DdDevelopmentConfig(DdConfig):
DEBUG = True
SQLALCHEMY_DATABASE_URI = "postgresql://duck:duck18@localhost/duck_dev"
SQLALCHEMY_ECHO = False
class DdTestingConfig(DdConfig):
TESTING = True
OAUTH_CREDENTIALS_FILE = ""
SQLALCHEMY_DATABASE_URI = "postgresql://duck:duck18@localhost/duck_test"
SQLALCHEMY_ECHO = False
class DdProductionConfig(DdConfig):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") or (
"sqlite:///" + os.path.join(basedir, "data.sqlite")
)
config = {
"development": DdDevelopmentConfig,
"testing": DdTestingConfig,
"production": DdProductionConfig,
"default": DdDevelopmentConfig,
}
| 26.397059 | 76 | 0.707521 |
219220542ea00abf2a362b079a16ff42c172e51f | 23 | py | Python | djangoutils/__init__.py | rensg001/django-utils | cbf32374f00446ac56092ccad83c7a75290e9914 | [
"MIT"
] | 96 | 2018-11-30T21:35:20.000Z | 2022-03-23T04:08:59.000Z | version.py | mdgreenwald/audit-policy-kubernetes-daemonset | 7b6a976cf74fbfe4e553c5f79b02606e80b82922 | [
"Apache-2.0"
] | 22 | 2018-12-01T02:37:28.000Z | 2022-03-12T08:50:58.000Z | version.py | mdgreenwald/audit-policy-kubernetes-daemonset | 7b6a976cf74fbfe4e553c5f79b02606e80b82922 | [
"Apache-2.0"
] | 43 | 2018-12-03T18:03:12.000Z | 2022-03-26T00:11:12.000Z |
__version__ = '0.0.1'
| 7.666667 | 21 | 0.608696 |
5a881f3326161bf3da928476965c18c9f971eb1a | 26,026 | py | Python | sublimeText3/Packages/SublimeCodeIntel/libs/codeintel2/common.py | MoAnsir/dot_file_2017 | 5f67ef8f430416c82322ab7e7e001548936454ff | [
"MIT"
] | 2 | 2018-04-24T10:02:26.000Z | 2019-06-02T13:53:31.000Z | Data/Packages/SublimeCodeIntel/libs/codeintel2/common.py | Maxize/Sublime_Text_3 | be620476b49f9a6ce2ca2cfe825c4e142e7e82b9 | [
"Apache-2.0"
] | 1 | 2016-02-10T09:50:09.000Z | 2016-02-10T09:50:09.000Z | Packages/SublimeCodeIntel/libs/codeintel2/common.py | prisis/sublime-text-packages | 99ae8a5496613e27a75e5bd91723549b21476e60 | [
"MIT"
] | 2 | 2019-04-11T04:13:02.000Z | 2019-06-02T13:53:33.000Z | #!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Code Intelligence: common definitions"""
# Dev Notes:
# - XXX Need some good top-level logging control functions for this package.
# - XXX Rationalize exceptions.
# - XXX Coding style name changes.
__all__ = [
"Trigger", "Definition", "CILEDriver", "Evaluator",
"EvalController", "LogEvalController",
"canonicalizePath", "parseAttributes", "isUnsavedPath",
"TRG_FORM_CPLN", "TRG_FORM_CALLTIP", "TRG_FORM_DEFN",
"PRIORITY_CONTROL", "PRIORITY_IMMEDIATE", "PRIORITY_CURRENT",
"PRIORITY_OPEN", "PRIORITY_BACKGROUND",
"CodeIntelDeprecationWarning",
"CodeIntelError", "NotATriggerError", "EvalError", "EvalTimeout",
"VirtualMethodError", "CitadelError", "NoBufferAccessorError",
"CILEError", "CIXError", "CIDBError", "DatabaseError",
"CorruptDatabase", "NotFoundInDatabase", "CITDLError",
"NoModuleEntry", "NoCIDBModuleEntry",
"LazyClassAttribute",
"ENABLE_HEURISTICS",
"_xpcom_",
]
import os
from os.path import dirname, join, normpath, exists, basename
import sys
import re
import stat
import time
import threading
import logging
import warnings
try:
from zope.cachedescriptors.property import Lazy as LazyClassAttribute
except ImportError:
import warnings
warnings.warn("Unable to import zope.cachedescriptors.property")
# Fallback to regular properties.
LazyClassAttribute = property
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
if "CODEINTEL_NO_PYXPCOM" in os.environ:
_xpcom_ = False
else:
try:
from xpcom import components
from xpcom.server import UnwrapObject
_xpcom_ = True
except ImportError:
_xpcom_ = False
# XXX Should only do this hack for non-Komodo local codeintel usage.
# XXX We need to have a better mechanism for rationalizing and sharing
# common lexer style classes. For now we'll just HACKily grab from
# Komodo's styles.py. Some of this is duplicating logic in
# KoLanguageServiceBase.py.
_ko_src_dir = normpath(join(dirname(__file__), *([os.pardir]*3)))
sys.path.insert(0, join(_ko_src_dir, "schemes"))
try:
import styles
finally:
del sys.path[0]
del _ko_src_dir
#---- general codeintel pragmas
# Allow the CILEs to generate type guesses based on type names (e.g.
# "event" is an Event in JS).
ENABLE_HEURISTICS = True
#---- warnings
class CodeIntelDeprecationWarning(DeprecationWarning):
pass
# Here is how to disable these warnings in your code:
# import warnings
# from codeintel2.common import CodeIntelDeprecationWarning
# warnings.simplefilter("ignore", CodeIntelDeprecationWarning)
warnings.simplefilter(
"ignore", CodeIntelDeprecationWarning) # turn off for now
#---- exceptions
class CodeIntelError(Exception):
"""Base Code Intelligence system error."""
pass
Error = CodeIntelError # XXX Remove uses of this in favour of CodeIntelError.
class NotATriggerError(CodeIntelError):
pass
class EvalError(CodeIntelError):
pass
class EvalTimeout(EvalError):
pass
class VirtualMethodError(CodeIntelError):
# TODO: pull out the method and class name from the stack for errmsg
# tell user what needs to be implemented
pass
class CitadelError(CodeIntelError):
pass
class NoBufferAccessorError(CodeIntelError):
"""The accessor has no buffer/content to access."""
pass
class CILEError(CitadelError):
"""CILE processing error."""
# XXX Should add some relevant data to the exception. Perhaps
# the request should be passed in and this c'tor can extract
# data it wants to keep. This could be used to facilitate
# submitting bug reports on our Language Engines.
pass
class CIXError(CitadelError):
"""Code Intelligence XML error."""
pass
class CIDBError(CitadelError):
"""Code Intelligence Database error."""
# TODO: Transition to DatabaseError and ensure that the change in
# base class doesn't cause problems.
pass
class DatabaseError(CodeIntelError):
pass
class CorruptDatabase(DatabaseError):
"""Corruption in some part of the database was found."""
# XXX Should add attributes that indicate which part
# was corrupt and/or one of a known set of possible corrupts.
# Then add a Database.recover() function that could attempt
# to recover with that argument.
pass
class NotFoundInDatabase(DatabaseError):
"""No data for the buffer was found in the database."""
pass
class CITDLError(CitadelError): # XXX Just drop in favour of CitadelError?
"""CITDL syntax error."""
pass
class NoModuleEntry(CIDBError):
"""There is no entry for this module in the CIDB.
The "module_path" second constructor argument (possibly None) is required
to allow completion handling (which will be trapping these errors) to use
that path to kick off a scan for it. This shouldn't be a burden as the
import handlers that raise this will just have looked for this path.
"""
def __init__(self, module_name, module_path):
CIDBError.__init__(self)
self.module_name = module_name # the module name
self.module_path = module_path
def __str__(self):
path_info = ""
if self.module_path:
path_info = " (%s)" % os.path.basename(self.module_path)
return "no module entry for '%s'%s in CIDB"\
% (self.module_name, path_info)
class NoCIDBModuleEntry(CIDBError): # XXX change name to NoModuleEntryForPath
"""There is no module entry for the given path in the CIDB."""
def __init__(self, path):
CIDBError.__init__(self)
self.path = path
def __str__(self):
return "no module entry for '%s' in CIDB"\
% os.path.basename(self.path)
#---- globals
# Trigger forms.
TRG_FORM_CPLN, TRG_FORM_CALLTIP, TRG_FORM_DEFN = list(range(3))
# Priorities at which scanning requests can be scheduled.
PRIORITY_CONTROL = 0 # Special sentinal priority to control scheduler
PRIORITY_IMMEDIATE = 1 # UI is requesting info on this file now
PRIORITY_CURRENT = 2 # UI requires info on this file soon
PRIORITY_OPEN = 3 # UI will likely require info on this file soon
PRIORITY_BACKGROUND = 4 # info may be needed sometime
# TODO: these are unused, drop them
# CIDB base type constants
BT_CLASSREF, BT_INTERFACEREF = list(range(2))
# TODO: These are unused, drop them, the symbolType2Name below and its dead
# usage in cb.py.
# CIDB symbol type constants
(ST_FUNCTION, ST_CLASS, ST_INTERFACE, ST_VARIABLE, ST_ARGUMENT) = list(range(5))
_symbolType2Name = {
ST_FUNCTION: "function",
ST_CLASS: "class",
ST_INTERFACE: "interface",
ST_VARIABLE: "variable",
ST_ARGUMENT: "argument"
}
#---- common codeintel base classes
class Trigger(object):
if _xpcom_:
_com_interfaces_ = [components.interfaces.koICodeIntelTrigger]
lang = None # e.g. "Python", "CSS"
form = None # TRG_FORM_CPLN or TRG_FORM_CALLTIP
type = None # e.g. "object-members"
pos = None # Trigger position, in bytes (of UTF 8)
implicit = None
# The number characters of the trigger. For most (but not all) triggers
# there is a clear distinction between a trigger token and a preceding
# context token. For example:
# foo.<|> # trigger token is '.', length = 1
# Foo::Bar-><|> # trigger token is '->', length = 2
# This default to 1.
length = None
# The number of characters after pos that should be replaced. Most of the
# time this will be zero. For example
# foo.<|>prop # extentLength is 4, for "prop"
# Note that this goes in the opposite direction of .length
extentLength = None
retriggerOnCompletion = False
def __init__(
self, lang, form, type, pos, implicit, length=1, extentLength=0,
**extra):
self.lang = lang
self.form = form
self.type = type
self.pos = pos
self.implicit = implicit
self.length = length
self.extentLength = extentLength
self.extra = extra # Trigger-specific extra data, if any
@property
def id(self):
return (self.lang, self.form, self.type)
__name = None
@property
def name(self):
"""A more user-friendly name for this trigger, e.g.
'python-complete-object-members'
"""
if self.__name is None:
form_str = {TRG_FORM_CPLN: "complete",
TRG_FORM_DEFN: "defn",
TRG_FORM_CALLTIP: "calltip"}[self.form]
self.__name = "%s-%s-%s" % (self.lang.lower(), form_str,
self.type)
return self.__name
def __repr__(self):
explicit_str = (not self.implicit) and " (explicit)" or ""
return "<Trigger '%s' at %d%s>" % (self.name, self.pos, explicit_str)
def is_same(self, trg):
"""Return True iff the given trigger is (effectively) the same
as this one.
Dev Note: "Effective" is currently left a little fuzzy. Just
comparing enough to fix Komodo Bug 55378.
"""
if _xpcom_:
trg = UnwrapObject(trg)
if (self.pos == trg.pos
and self.type == trg.type
and self.form == trg.form
and self.lang == trg.lang):
return True
else:
return False
def to_dict(self):
"""Serialize this trigger as a dictionary
This is used for out-of-process codeintel
"""
return dict(lang=self.lang, form=self.form, type=self.type,
pos=self.pos, implicit=self.implicit, length=self.length,
extentLength=self.extentLength,
retriggerOnCompletion=self.retriggerOnCompletion,
**self.extra)
class Definition(object):
if _xpcom_:
_com_interfaces_ = [components.interfaces.koICodeIntelDefinition]
lang = None # e.g. "Python", "CSS"
path = None # e.g. "/usr/local/..."
blobname = None # e.g. "sys"
lpath = None # lookup tuple in blob, e.g. ["MyClass", "afunc"]
name = None # e.g. "path"
line = None # e.g. 345 (1-based)
ilk = None # e.g. "function"
citdl = None # e.g. "int"
signature = None # e.g. "function xyz(...)"
doc = None # e.g. "Xyz is just nasty stuff..."
attributes = None # e.g. "local private"
returns = None # e.g. "int"
scopestart = None # e.g. 320 (1-based)
scopeend = None # e.g. 355 (1-based)
def __init__(self, lang, path, blobname, lpath, name, line, ilk,
citdl, doc, signature=None, attributes=None,
returns=None, scopestart=None, scopeend=None):
self.lang = lang
self.path = path
self.blobname = blobname
self.lpath = lpath
self.name = name
self.line = line
self.ilk = ilk
self.citdl = citdl
self.doc = doc
self.signature = signature
self.attributes = attributes
self.returns = returns
self.scopestart = scopestart
self.scopeend = scopeend
def __repr__(self):
if self.path is None:
return "<Definition: %s '%s' at %s#%s lpath=%s>"\
% (self.ilk, self.name, self.blobname, self.line, self.lpath)
else:
return "<Definition: %s '%s' at %s#%s in %s lpath=%s>"\
% (self.ilk, self.name, self.blobname, self.line,
basename(self.path), self.lpath)
def equals(self, other):
""" Equality comparision for XPCOM """
if _xpcom_:
try:
other = UnwrapObject(other)
except:
pass
for attr in (
"lang", "path", "blobname", "lpath", "name", "line", "ilk",
"citdl", "doc", "signature", "attributes", "returns"):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def toString(self):
""" toString implementation for XPCOM """
return repr(self)
@classmethod
def unique_definitions(cls, defns):
"""Takes a collection of defns and returns the unique list of defns."""
unique_defns = []
for defn in defns:
for unique_defn in unique_defns:
if unique_defn.path == defn.path and unique_defn == defn:
# defn is already in the unique_defn list.
break
else:
unique_defns.append(defn)
return unique_defns
class CILEDriver(object):
"""Base class for all CILE drivers.
CILE stands for "CodeIntel Language Engine". A CILE is the thing that
knows how to convert content of a specific language to CIX (the XML data
loaded into the CIDB, then used for completion, code browsers, etc.)
A CILE *driver* is a class that implements this interface on top of a
language's CILE. A CILE might be a Python module, a separate executable,
whatever.
"""
def __init__(self, mgr):
self.mgr = mgr
# DEPRECATED
def scan(self, request):
"""Scan the given file and return data as a CIX document.
"request" is a ScanRequest instance.
This method MUST be re-entrant. The scheduler typically runs a pool
of scans simultaneously so individual drivers can be called into from
multiple threads.
If the scan was successful, returns a CIX document (XML). Note: the
return value should be unicode string, i.e. NOT an encoded byte
string -- encoding to UTF-8 is done as necessary elsewhere.
Raises a CILEError if there was a problem scanning. I.e. a driver
should be resistant to CILE hangs and crashes.
"""
raise VirtualMethodError("CILEDriver.scan")
def scan_purelang(self, buf):
"""Scan the given buffer and return a CIX element tree.
"buf" is an instance of this language's Buffer class.
"""
raise VirtualMethodError("CILEDriver.scan_purelang")
def scan_binary(self, buf):
"""Scan the given binary buffer and return a CIX element tree.
"buf" is an instance of this language's BinaryBuffer clas
"""
raise VirtualMethodError("CILEDriver.scan_binary")
def scan_multilang(self, buf, csl_cile_driver=None):
"""Scan the given multilang (UDL-based) buffer and return a CIX
element tree.
"buf" is the multi-lang UDLBuffer instance (e.g.
lang_rhtml.RHTMLBuffer for RHTML).
"csl_cile_driver" (optional) is the CSL (client-side language)
CILE driver. While scanning, CSL tokens should be gathered and,
if any, passed to the CSL scanner like this:
csl_cile_driver.scan_csl_tokens(
file_elem, blob_name, csl_tokens)
The CSL scanner will append a CIX <scope ilk="blob">
element to the <file> element.
A language that supports being part of a multi-lang document
must implement this method.
"""
raise VirtualMethodError("CILEDriver.scan_multilang")
def scan_csl_tokens(self, file_elem, blob_name, csl_tokens):
"""Generate a CIX <scope ilk="blob"> tree for the given CSL
(client-side language) tokens and append the blob to the given
file element.
A language that supports being a client-side language in a
multi-lang document must implement this method. Realistically
this just means JavaScript for now, but could eventually include
Python for the new Mozilla DOM_AGNOSTIC work.
"""
raise VirtualMethodError("CILEDriver.scan_csl_tokens")
class EvalController(object):
"""A class for interaction with an asynchronous evaluation of completions
or calltips. Typically for "interesting" interaction on would subclass
this and pass an instance of that class to Buffer.async_eval_at_trg().
"""
def __init__(self):
self.complete_event = threading.Event() # use a pool?
self._done = False
self._aborted = False
self.buf = None
self.trg = None
self.cplns = None
self.calltips = None
self.defns = None
self.desc = None
self.keep_existing = False
def close(self):
"""Done with this eval controller, clear any references"""
pass
def start(self, buf, trg):
"""Called by the evaluation engine to indicate the beginning of
evaluation and to pass in data the controller might need.
"""
self.buf = buf
self.trg = trg
def set_desc(self, desc):
self.desc = desc
def done(self, reason):
"""Called by the evaluation engine to indicate completion handling
has finished."""
self.info("done eval: %s", reason)
self._done = True
self.buf = None
self.trg = None
self.complete_event.set()
def is_done(self):
return self._done
def abort(self):
"""Signal to completion handling system to abort the current
completion session.
"""
self._aborted = True
def is_aborted(self):
return self._aborted
def wait(self, timeout=None):
"""Block until this completion session is done or
until the timeout is reached.
"""
self.complete_event.wait(timeout)
def debug(self, msg, *args):
pass
def info(self, msg, *args):
pass
def warn(self, msg, *args):
pass
def error(self, msg, *args):
pass
# XXX Perhaps this capturing should be in a sub-class used only for
# testing. Normal IDE behaviour is to fwd the data in set_*().
def set_cplns(self, cplns):
self.cplns = cplns
def set_calltips(self, calltips):
self.calltips = calltips
def set_defns(self, defns):
self.defns = defns
class LogEvalController(EvalController):
def __init__(self, logger_or_log_name=None):
if isinstance(logger_or_log_name, logging.getLoggerClass()):
self.logger = logger_or_log_name
else:
self.logger = logging.getLogger(logger_or_log_name)
EvalController.__init__(self)
def debug(self, msg, *args):
self.logger.debug(msg, *args)
def info(self, msg, *args):
self.logger.info(msg, *args)
def warn(self, msg, *args):
self.logger.warn(msg, *args)
def error(self, msg, *args):
self.logger.error(msg, *args)
class Evaluator(object):
"""To do asynchronous autocomplete/calltip evaluation you create an
Evaluator instance (generally a specialized subclass of) and pass it
to Manager.request_eval() and/or Manager.request_reeval().
At a minimum a subclass must implement the eval() method making sure
that the rules described for Buffer.async_eval_at_trg() are followed
(see buffer.py). Typically this just means:
- ensuring ctlr.done() is called,
- reacting to ctlr.is_aborted(), and
- optionally calling the other EvalController methods as appropriate.
A subclass should also implement readable __str__ output.
The manager handles:
- co-ordinating a queue of evaluation requests
- only ever running one evaluation at a time (because it only makes sense
in an IDE to have one on the go)
- calling the evaluator's eval() method in a subthread
- calling ctlr.done(<reason>) if the eval terminates with an exception
One important base class is the CitadelEvaluator (see citadel.py) that
knows how to do CITDL evaluation using the CIDB. Citadel languages
(e.g. Perl, Python, ...) will generally use CitadelEvaluators for most
of their triggers.
"""
def __init__(self, ctlr, buf, trg):
assert isinstance(ctlr, EvalController)
self.ctlr = ctlr
# assert isinstance(buf, Buffer) # commented out to avoid circular dep
self.buf = buf
assert isinstance(trg, Trigger)
self.trg = trg
def eval(self):
self.ctlr.done("eval not implemented")
raise VirtualMethodError("Evaluator.eval")
def close(self):
"""Done with this evaluator, clear any references"""
if self.ctlr is not None:
self.ctlr.close()
#---- helper methods
# TODO: drop this (see note above)
def symbolType2Name(st):
return _symbolType2Name[st]
# TODO: drop this, see similar func in parseutil.py
def xmlattrstr(attrs):
"""Construct an XML-safe attribute string from the given attributes
"attrs" is a dictionary of attributes
The returned attribute string includes a leading space, if necessary,
so it is safe to use the string right after a tag name.
"""
# XXX Should this be using
from xml.sax.saxutils import quoteattr
s = ''
names = list(attrs.keys())
names.sort() # dump attrs sorted by key, not necessary but is more stable
for name in names:
s += ' %s=%s' % (name, quoteattr(str(attrs[name])))
return s
def isUnsavedPath(path):
"""Return true if the given path is a special <Unsaved>\sub\path file."""
tag = "<Unsaved>"
length = len(tag)
if path.startswith(tag) and (len(path) == length or path[length] in "\\/"):
return True
else:
return False
# TODO: move this utils.py
_uriMatch = re.compile("^\w+://")
def canonicalizePath(path, normcase=True):
r"""Return what CodeIntel considers a canonical version of the given path.
"path" is the path to canonicalize.
"normcase" (optional, default True) is a boolean indicating if the
case should be normalized.
"Special" paths are ones of the form "<Tag>\sub\path". Supported special
path tags:
<Unsaved> Used when the given path isn't a real file: e.g.
unsaved document buffers.
Raises a ValueError if it cannot be converted to a canonical path.
>>> canonicalizePath(r"C:\Python22\Lib\os.py") # normcase on Windows
'c:\\python22\\lib\\os.py'
>>> canonicalizePath(r"<Unsaved>\Python-1.py")
'<Unsaved>\\python-1.py'
>>> canonicalizePath("<Unsaved>")
'<Unsaved>'
>>> canonicalizePath("<Unsaved>\\")
'<Unsaved>'
>>> canonicalizePath("ftp://ftp.ActiveState.com/pub")
'ftp://ftp.ActiveState.com/pub'
"""
if path is None:
raise ValueError("cannot canonicalize path, path is None")
if path.startswith('<'): # might be a special path
first, rest = None, None
for i in range(1, len(path)):
if path[i] in "\\/":
first, rest = path[:i], path[i+1:]
break
else:
first, rest = path, None
if first.endswith('>'):
tag = first
subpath = rest
if tag == "<Unsaved>":
pass # leave tag unchanged
else:
raise ValueError("unknown special path tag: %s" % tag)
cpath = tag
if subpath:
subpath = os.path.normpath(subpath)
if normcase:
subpath = os.path.normcase(subpath)
cpath = os.path.join(cpath, subpath)
return cpath
if _uriMatch.match(path): # ftp://, koremote://
# XXX Should we normcase() a UR[LI]
return path
else:
cpath = os.path.normpath(os.path.abspath(path))
if normcase:
cpath = os.path.normcase(cpath)
return cpath
# TODO: move this utils.py
def parseAttributes(attrStr=None):
"""Parse the given attributes string (from CIX) into an attribute dict."""
attrs = {}
if attrStr is not None:
for token in attrStr.split():
if '=' in token:
key, value = token.split('=', 1)
else:
key, value = token, 1
attrs[key] = value
return attrs
#---- self-test code
if __name__ == '__main__':
def _test():
import doctest
from . import common
return doctest.testmod(common)
_test()
| 33.238825 | 80 | 0.63963 |
6ae7e26c44d27ed4cdb0094e407fa4e296af5b20 | 17,256 | py | Python | sdk/python/pulumi_azure_native/web/v20210201/web_app_premier_add_on.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210201/web_app_premier_add_on.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210201/web_app_premier_add_on.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['WebAppPremierAddOnArgs', 'WebAppPremierAddOn']
@pulumi.input_type
class WebAppPremierAddOnArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
marketplace_offer: Optional[pulumi.Input[str]] = None,
marketplace_publisher: Optional[pulumi.Input[str]] = None,
premier_add_on_name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vendor: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppPremierAddOn resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] marketplace_offer: Premier add on Marketplace offer.
:param pulumi.Input[str] marketplace_publisher: Premier add on Marketplace publisher.
:param pulumi.Input[str] premier_add_on_name: Add-on name.
:param pulumi.Input[str] product: Premier add on Product.
:param pulumi.Input[str] sku: Premier add on SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vendor: Premier add on Vendor.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if marketplace_offer is not None:
pulumi.set(__self__, "marketplace_offer", marketplace_offer)
if marketplace_publisher is not None:
pulumi.set(__self__, "marketplace_publisher", marketplace_publisher)
if premier_add_on_name is not None:
pulumi.set(__self__, "premier_add_on_name", premier_add_on_name)
if product is not None:
pulumi.set(__self__, "product", product)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vendor is not None:
pulumi.set(__self__, "vendor", vendor)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="marketplaceOffer")
def marketplace_offer(self) -> Optional[pulumi.Input[str]]:
"""
Premier add on Marketplace offer.
"""
return pulumi.get(self, "marketplace_offer")
@marketplace_offer.setter
def marketplace_offer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "marketplace_offer", value)
@property
@pulumi.getter(name="marketplacePublisher")
def marketplace_publisher(self) -> Optional[pulumi.Input[str]]:
"""
Premier add on Marketplace publisher.
"""
return pulumi.get(self, "marketplace_publisher")
@marketplace_publisher.setter
def marketplace_publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "marketplace_publisher", value)
@property
@pulumi.getter(name="premierAddOnName")
def premier_add_on_name(self) -> Optional[pulumi.Input[str]]:
"""
Add-on name.
"""
return pulumi.get(self, "premier_add_on_name")
@premier_add_on_name.setter
def premier_add_on_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "premier_add_on_name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
Premier add on Product.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
Premier add on SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def vendor(self) -> Optional[pulumi.Input[str]]:
"""
Premier add on Vendor.
"""
return pulumi.get(self, "vendor")
@vendor.setter
def vendor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vendor", value)
class WebAppPremierAddOn(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
marketplace_offer: Optional[pulumi.Input[str]] = None,
marketplace_publisher: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
premier_add_on_name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vendor: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Premier add-on.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] location: Resource Location.
:param pulumi.Input[str] marketplace_offer: Premier add on Marketplace offer.
:param pulumi.Input[str] marketplace_publisher: Premier add on Marketplace publisher.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] premier_add_on_name: Add-on name.
:param pulumi.Input[str] product: Premier add on Product.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] sku: Premier add on SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] vendor: Premier add on Vendor.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppPremierAddOnArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Premier add-on.
:param str resource_name: The name of the resource.
:param WebAppPremierAddOnArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppPremierAddOnArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
marketplace_offer: Optional[pulumi.Input[str]] = None,
marketplace_publisher: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
premier_add_on_name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vendor: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppPremierAddOnArgs.__new__(WebAppPremierAddOnArgs)
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["marketplace_offer"] = marketplace_offer
__props__.__dict__["marketplace_publisher"] = marketplace_publisher
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["premier_add_on_name"] = premier_add_on_name
__props__.__dict__["product"] = product
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["vendor"] = vendor
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20210201:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppPremierAddOn"), pulumi.Alias(type_="azure-native:web/v20210115:WebAppPremierAddOn"), pulumi.Alias(type_="azure-nextgen:web/v20210115:WebAppPremierAddOn")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppPremierAddOn, __self__).__init__(
'azure-native:web/v20210201:WebAppPremierAddOn',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppPremierAddOn':
"""
Get an existing WebAppPremierAddOn resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppPremierAddOnArgs.__new__(WebAppPremierAddOnArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["marketplace_offer"] = None
__props__.__dict__["marketplace_publisher"] = None
__props__.__dict__["name"] = None
__props__.__dict__["product"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["vendor"] = None
return WebAppPremierAddOn(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="marketplaceOffer")
def marketplace_offer(self) -> pulumi.Output[Optional[str]]:
"""
Premier add on Marketplace offer.
"""
return pulumi.get(self, "marketplace_offer")
@property
@pulumi.getter(name="marketplacePublisher")
def marketplace_publisher(self) -> pulumi.Output[Optional[str]]:
"""
Premier add on Marketplace publisher.
"""
return pulumi.get(self, "marketplace_publisher")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def product(self) -> pulumi.Output[Optional[str]]:
"""
Premier add on Product.
"""
return pulumi.get(self, "product")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional[str]]:
"""
Premier add on SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def vendor(self) -> pulumi.Output[Optional[str]]:
"""
Premier add on Vendor.
"""
return pulumi.get(self, "vendor")
| 42.294118 | 1,771 | 0.638908 |
37f1ec7c9adcb66a49ff530c70d15443ee6b804b | 231 | py | Python | replay_game.py | HayatoDoi/lb_auto | a5d4a9d4de520d5dac2ad9b823d70faaeb12d519 | [
"CC0-1.0"
] | null | null | null | replay_game.py | HayatoDoi/lb_auto | a5d4a9d4de520d5dac2ad9b823d70faaeb12d519 | [
"CC0-1.0"
] | null | null | null | replay_game.py | HayatoDoi/lb_auto | a5d4a9d4de520d5dac2ad9b823d70faaeb12d519 | [
"CC0-1.0"
] | null | null | null | #!python39
from lib.adb import Adb
from lib import last_bullet
from time import sleep
def scenario(adb):
while True:
last_bullet.replay(adb)
sleep(3)
if __name__ == '__main__':
with Adb('5b09ee92') as adb:
scenario(adb)
| 15.4 | 29 | 0.727273 |
99a767261e47ad1fefa10f80bc26b1088fbf65b0 | 2,190 | py | Python | JPS/python/caresjpsadmsinputs/legacy-delete_after_testing/admsMainNew.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | JPS/python/caresjpsadmsinputs/legacy-delete_after_testing/admsMainNew.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | JPS/python/caresjpsadmsinputs/legacy-delete_after_testing/admsMainNew.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | from admsInputDataRetrieverNew import admsInputDataRetriever
from admsAplWriter import admsAplWriter
from collections import namedtuple
from caresjpsutil import PythonLogger
import json
import sys
import config
import cobbling
if __name__ == "__main__":
pythonLogger = PythonLogger('admsMainNew.py')
try:
pythonLogger.postInfoToLogServer('start with ' + sys.argv[0] + ' plant = ' + sys.argv[1] + ' coordinates = ' + sys.argv[2]
+ ' ADMS working dir = ' + sys.argv[3] + ' top = ' + config.bldTopnode)
cobbling.run()
coordinates = str(sys.argv[2]).replace("'", "\"").replace('#',',');
coordinates = json.loads(coordinates)
buildingdata = sys.argv[4].replace("\'","\"")
buildingdata = json.loads(buildingdata)
BDN = namedtuple('BDN', ['BldNumBuildings','BldName','BldType','BldX','BldY','BldHeight', 'BldLength', 'BldWidth', 'BldAngle'])
BDN.BldName = buildingdata['BldName']
BDN.BldNumBuildings = len(BDN.BldName)
BDN.BldType = buildingdata['BldType']
BDN.BldX = buildingdata['BldX']
BDN.BldY = buildingdata['BldY']
BDN.BldHeight = buildingdata['BldHeight']
BDN.BldLength = buildingdata['BldLength']
BDN.BldWidth = buildingdata['BldWidth']
BDN.BldAngle = buildingdata['BldAngle']
pythonLogger.postInfoToLogServer('coordinates=' + str(coordinates))
plant = str(sys.argv[1])
# workingDir = str(sys.argv[4]).replace('/','//')
workingDir = str(sys.argv[3])
pythonLogger.postInfoToLogServer('workingDir=' + workingDir)
test = admsInputDataRetriever(plant,config.bldTopnode, coordinates, ["CO2" ,"CO" , "NO2" , "HC" , "NOx"], 2, config.bdnLimit,False, BDN)
result = test.get()
pythonLogger.postInfoToLogServer('calling admsAplWriter ...')
result['Bdn'] = BDN
writer = admsAplWriter(result, workingDir + '/test.apl')
writer.write()
pythonLogger.postInfoToLogServer('end')
except Exception as e:
pythonLogger.postInfoToLogServer(e) | 39.818182 | 150 | 0.615982 |
6078ea7e91f08718f77ffc293154d8a64e7cdd30 | 1,505 | py | Python | maze.py | meznak/maze-generator-py | 47156a9c4ec786739a60d46ccb20661416c5a076 | [
"MIT"
] | null | null | null | maze.py | meznak/maze-generator-py | 47156a9c4ec786739a60d46ccb20661416c5a076 | [
"MIT"
] | 2 | 2020-04-17T07:00:47.000Z | 2020-04-17T07:01:55.000Z | maze.py | meznak/maze-generator-py | 47156a9c4ec786739a60d46ccb20661416c5a076 | [
"MIT"
] | null | null | null | import pygame as pg
from cell import Cell
class Maze:
def __init__(self, size):
self.width = size[0]
self.height = size[1]
self.make_grid()
self.stack = [self.grid[0][0]]
self.current = self.stack[0]
self.finished = False
def make_grid(self):
self.grid = []
for i in range(self.width):
col = []
for j in range(self.height):
cell = Cell((i, j))
col.append(cell)
self.grid.append(col)
for col in self.grid:
for cell in col:
cell.get_neighbors(self.grid)
self.start = self.grid[0][0]
self.end = self.grid[self.width - 1][self.height - 1]
self.start.is_start = True
self.end.is_end = True
def update(self):
if len(self.stack):
self.current.changed = True
self.current = self.stack.pop()
self.current.update(self.stack)
else:
self.current = None
self.finished = True
def show(self, surface):
width, height = surface.get_size()
cell_size = (width / self.width, height / self.height)
changed_cells = [None]
for col in self.grid:
for cell in col:
cell.show(surface, cell_size, self.current, changed_cells)
return changed_cells
def reset(self):
self.__init__((self.width, self.height)) | 26.875 | 74 | 0.522259 |
82a52c2deb7dff029f827324a1de9bf6659f6960 | 2,964 | py | Python | utils/pose_utils.py | akashsengupta1997/GraphCMR | 0b8b05be4f711995ba50e414effbde98b6b11c5b | [
"BSD-3-Clause"
] | null | null | null | utils/pose_utils.py | akashsengupta1997/GraphCMR | 0b8b05be4f711995ba50e414effbde98b6b11c5b | [
"BSD-3-Clause"
] | null | null | null | utils/pose_utils.py | akashsengupta1997/GraphCMR | 0b8b05be4f711995ba50e414effbde98b6b11c5b | [
"BSD-3-Clause"
] | null | null | null | """
Parts of the code are adapted from https://github.com/akanazawa/hmr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def compute_similarity_transform(S1, S2):
"""Computes a similarity transform (sR, t) that takes
a set of 3D points S1 (3 x N) closest to a set of 3D points S2,
where R is an 3x3 rotation matrix, t 3x1 translation, s scale.
i.e. solves the orthogonal Procrutes problem.
"""
transposed = False
if S1.shape[0] != 3 and S1.shape[0] != 2:
S1 = S1.T
S2 = S2.T
transposed = True
assert(S2.shape[1] == S1.shape[1])
# 1. Remove mean.
mu1 = S1.mean(axis=1, keepdims=True)
mu2 = S2.mean(axis=1, keepdims=True)
X1 = S1 - mu1
X2 = S2 - mu2
# 2. Compute variance of X1 used for scale.
var1 = np.sum(X1**2)
# 3. The outer product of X1 and X2.
K = X1.dot(X2.T)
# 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
# singular vectors of K.
U, s, Vh = np.linalg.svd(K)
V = Vh.T
# Construct Z that fixes the orientation of R to get det(R)=1.
Z = np.eye(U.shape[0])
Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
# Construct R.
R = V.dot(Z.dot(U.T))
# 5. Recover scale.
scale = np.trace(R.dot(K)) / var1
# 6. Recover translation.
t = mu2 - scale*(R.dot(mu1))
# 7. Error:
S1_hat = scale*R.dot(S1) + t
if transposed:
S1_hat = S1_hat.T
return S1_hat
def compute_similarity_transform_batch(S1, S2):
"""Batched version of compute_similarity_transform."""
S1_hat = np.zeros_like(S1)
for i in range(S1.shape[0]):
S1_hat[i] = compute_similarity_transform(S1[i], S2[i])
return S1_hat
def reconstruction_error(S1, S2, reduction='mean'):
"""Do Procrustes alignment and compute reconstruction error."""
S1_hat = compute_similarity_transform_batch(S1, S2)
re = np.sqrt( ((S1_hat - S2)** 2).sum(axis=-1)).mean(axis=-1)
if reduction == 'mean':
re = re.mean()
elif reduction == 'sum':
re = re.sum()
return re
def scale_and_translation_transform_batch(P, T):
"""
First Normalises batch of input 3D meshes P such that each mesh has mean (0, 0, 0) and
RMS distance from mean = 1.
Then transforms P such that it has the same mean and RMSD as T.
:param P: (batch_size, N, 3) batch of N 3D meshes to transform.
:param T: (batch_size, N, 3) batch of N reference 3D meshes.
:return: P transformed
"""
P_mean = np.mean(P, axis=1, keepdims=True)
P_trans = P - P_mean
P_scale = np.sqrt(np.sum(P_trans ** 2, axis=(1, 2), keepdims=True) / P.shape[1])
P_normalised = P_trans / P_scale
T_mean = np.mean(T, axis=1, keepdims=True)
T_scale = np.sqrt(np.sum((T - T_mean) ** 2, axis=(1, 2), keepdims=True) / P.shape[1])
P_transformed = P_normalised * T_scale + T_mean
return P_transformed | 31.2 | 90 | 0.629892 |
ab0699548e7e7211536ed0ab9ef96698ce7d7b51 | 2,803 | py | Python | signing/sawtooth_signing/__init__.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 18 | 2019-03-01T16:50:27.000Z | 2022-02-12T19:47:25.000Z | signing/sawtooth_signing/__init__.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 10 | 2020-05-12T06:58:15.000Z | 2022-02-26T23:59:35.000Z | signing/sawtooth_signing/__init__.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 40 | 2019-01-02T18:02:37.000Z | 2022-03-05T06:09:14.000Z | # Copyright 2016, 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from sawtooth_signing.core import NoSuchAlgorithmError
from sawtooth_signing.core import ParseError
from sawtooth_signing.core import SigningError
from sawtooth_signing.secp256k1 import Secp256k1Context
class Signer:
"""A convenient wrapper of Context and PrivateKey
"""
def __init__(self, context, private_key):
"""
"""
self._context = context
self._private_key = private_key
self._public_key = None
def sign(self, message):
"""Signs the given message
Args:
message (bytes): the message bytes
Returns:
The signature in a hex-encoded string
Raises:
SigningError: if any error occurs during the signing process
"""
return self._context.sign(message, self._private_key)
def get_public_key(self):
"""Return the public key for this Signer instance.
"""
# Lazy-eval the public key
if self._public_key is None:
self._public_key = self._context.get_public_key(self._private_key)
return self._public_key
class CryptoFactory:
"""Factory for generating Signers.
"""
def __init__(self, context):
self._context = context
@property
def context(self):
"""Return the context that backs this factory instance
"""
return self._context
def new_signer(self, private_key):
"""Create a new signer for the given private key.
Args:
private_key (:obj:`PrivateKey`): a private key
Returns:
(:obj:`Signer`): a signer instance
"""
return Signer(self._context, private_key)
def create_context(algorithm_name):
"""Returns an algorithm instance by name.
Args:
algorithm_name (str): the algorithm name
Returns:
(:obj:`Context`): a context instance for the given algorithm
Raises:
NoSuchAlgorithmError if the algorithm is unknown
"""
if algorithm_name == 'secp256k1':
return Secp256k1Context()
raise NoSuchAlgorithmError("no such algorithm: {}".format(algorithm_name))
| 28.896907 | 80 | 0.649304 |
8056ab863338091e72b2827e203926eb5c44c45a | 2,717 | py | Python | python/paddle/fluid/tests/unittests/test_dist_mnist_gradient_merge.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 17,085 | 2016-11-18T06:40:52.000Z | 2022-03-31T22:52:32.000Z | python/paddle/fluid/tests/unittests/test_dist_mnist_gradient_merge.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 29,769 | 2016-11-18T06:35:22.000Z | 2022-03-31T16:46:15.000Z | python/paddle/fluid/tests/unittests/test_dist_mnist_gradient_merge.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4,641 | 2016-11-18T07:43:33.000Z | 2022-03-31T15:15:02.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
from test_dist_base import TestDistBase
import paddle.fluid as fluid
flag_name = os.path.splitext(__file__)[0]
class TestDistMnistGradMerge(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._nccl2_mode = True
def test_dist_train(self):
if fluid.core.is_compiled_with_cuda():
self.check_with_place(
"dist_mnist_gradient_merge.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name)
class TestDistMnistGradMergeNoFuse(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._nccl2_mode = True
self._fuse_all_reduce = False
def test_dist_train(self):
if fluid.core.is_compiled_with_cuda():
self.check_with_place(
"dist_mnist_gradient_merge.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name + "_no_fuse")
class TestDistMnistGradMergeRawOptimizerBase(TestDistBase):
def _setup_config(self):
self._use_reader_alloc = False
self._nccl2_mode = True
self._use_fleet_api = True
self._use_fleet_api_20 = True
def enable_avg(self):
return False
def test_dist_train(self):
if fluid.core.is_compiled_with_cuda():
avg = str(self.enable_avg())
log_name = flag_name + "_raw_optimizer_gm_avg_" + avg
self.check_with_place(
"dist_mnist_gradient_merge_raw_optimizer.py",
delta=1e-5,
check_error_log=True,
log_name=log_name,
need_envs={
'FLAGS_apply_pass_to_program': '1',
'enable_gm_avg': avg,
})
class TestDistMnistGradMergeRawOptimizerAvg(
TestDistMnistGradMergeRawOptimizerBase):
def enable_avg(self):
return True
if __name__ == "__main__":
unittest.main()
| 30.875 | 74 | 0.655134 |
82e6fafd8e752cf9d26c7faadcc35baf3ec5ecb9 | 777 | py | Python | document_worker/exceptions.py | ds-wizard/document-worker | 09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b | [
"Apache-2.0"
] | 2 | 2021-04-23T19:28:10.000Z | 2021-04-27T05:30:07.000Z | document_worker/exceptions.py | ds-wizard/document-worker | 09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b | [
"Apache-2.0"
] | 9 | 2021-04-07T16:48:34.000Z | 2022-02-21T15:48:31.000Z | document_worker/exceptions.py | ds-wizard/document-worker | 09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b | [
"Apache-2.0"
] | 6 | 2020-03-09T07:44:01.000Z | 2020-12-02T18:26:37.000Z | class JobException(Exception):
def __init__(self, job_id: str, msg: str, exc=None):
self.job_id = job_id
self.msg = msg
self.exc = exc
def log_message(self):
if self.exc is None:
return self.msg
else:
return f'{self.msg}: [{type(self.exc).__name__}] {str(self.exc)}'
def db_message(self):
if self.exc is None:
return self.msg
return f'{self.msg}\n\n' \
f'Exception: {type(self.exc).__name__}\n' \
f'Message: {str(self.exc)}'
def create_job_exception(job_id: str, message: str, exc=None):
if isinstance(exc, JobException):
return exc
return JobException(
job_id=job_id,
msg=message,
exc=exc,
)
| 25.064516 | 77 | 0.555985 |
4faad0d19f5dd267c705c93e4c45ce05b6d445d1 | 15,378 | py | Python | picomc/version.py | Zajozor/picomc | 3df03c1191544053ea1e237b9860ed9f4782b029 | [
"MIT"
] | null | null | null | picomc/version.py | Zajozor/picomc | 3df03c1191544053ea1e237b9860ed9f4782b029 | [
"MIT"
] | null | null | null | picomc/version.py | Zajozor/picomc | 3df03c1191544053ea1e237b9860ed9f4782b029 | [
"MIT"
] | null | null | null | import enum
import json
import operator
import os
import posixpath
import shutil
import urllib.parse
import urllib.request
from functools import reduce
from pathlib import PurePath
import requests
from picomc.downloader import DownloadQueue
from picomc.java import get_java_info
from picomc.library import Library
from picomc.logging import logger
from picomc.rules import match_ruleset
from picomc.utils import Directory, die, file_sha1, recur_files
class VersionType(enum.Flag):
NONE = 0
RELEASE = enum.auto()
SNAPSHOT = enum.auto()
ALPHA = enum.auto()
BETA = enum.auto()
ANY = RELEASE | SNAPSHOT | ALPHA | BETA
def match(self, s):
names = {
"release": VersionType.RELEASE,
"snapshot": VersionType.SNAPSHOT,
"old_alpha": VersionType.ALPHA,
"old_beta": VersionType.BETA,
}
return bool(names[s] & self)
@staticmethod
def create(release, snapshot, alpha, beta):
D = {
VersionType.RELEASE: release,
VersionType.SNAPSHOT: snapshot,
VersionType.ALPHA: alpha,
VersionType.BETA: beta,
}.items()
return reduce(operator.or_, (k for k, v in D if v), VersionType.NONE)
def argumentadd(d1, d2):
d = d1.copy()
for k, v in d2.items():
if k in d:
d[k] += v
else:
d[k] = v
return d
_sentinel = object()
LEGACY_ASSETS = {
"id": "legacy",
"sha1": "770572e819335b6c0a053f8378ad88eda189fc14",
"size": 109634,
"totalSize": 153475165,
"url": (
"https://launchermeta.mojang.com/v1/packages/"
"770572e819335b6c0a053f8378ad88eda189fc14/legacy.json"
),
}
class VersionSpec:
def __init__(self, vobj, version_manager):
self.vobj = vobj
self.chain = self.resolve_chain(version_manager)
self.initialize_fields()
def resolve_chain(self, version_manager):
chain = []
chain.append(self.vobj)
cv = self.vobj
while "inheritsFrom" in cv.raw_vspec:
cv = version_manager.get_version(cv.raw_vspec["inheritsFrom"])
chain.append(cv)
return chain
def attr_override(self, attr, default=_sentinel):
for v in self.chain:
if attr in v.raw_vspec:
return v.raw_vspec[attr]
if default is _sentinel:
raise AttributeError(attr)
return default
def attr_reduce(self, attr, reduce_func):
L = [v.raw_vspec[attr] for v in self.chain[::-1] if attr in v.raw_vspec]
if not L:
raise AttributeError(attr)
return reduce(reduce_func, L)
def initialize_fields(self):
try:
self.minecraftArguments = self.attr_override("minecraftArguments")
except AttributeError:
pass
try:
self.arguments = self.attr_reduce("arguments", argumentadd)
except AttributeError:
pass
self.mainClass = self.attr_override("mainClass")
self.assetIndex = self.attr_override("assetIndex", default=None)
self.assets = self.attr_override("assets", default="legacy")
if self.assetIndex is None and self.assets == "legacy":
self.assetIndex = LEGACY_ASSETS
self.libraries = self.attr_reduce("libraries", lambda x, y: y + x)
self.jar = self.attr_override("jar", default=self.vobj.version_name)
self.downloads = self.attr_override("downloads", default={})
class Version:
ASSETS_URL = "http://resources.download.minecraft.net/"
def __init__(self, version_name, launcher, version_manifest):
self.version_name = version_name
self.launcher = launcher
self.vm = launcher.version_manager
self.version_manifest = version_manifest
self._libraries = dict()
self.versions_root = self.vm.versions_root
self.assets_root = self.launcher.get_path(Directory.ASSETS)
self.raw_vspec = self.get_raw_vspec()
self.vspec = VersionSpec(self, self.vm)
if self.vspec.assetIndex is not None:
self.raw_asset_index = self.get_raw_asset_index(self.vspec.assetIndex)
self.jarname = self.vspec.jar
self.jarfile = self.versions_root / self.jarname / "{}.jar".format(self.jarname)
def get_raw_vspec(self):
vspec_path = (
self.versions_root / self.version_name / "{}.json".format(self.version_name)
)
if not self.version_manifest:
if vspec_path.exists():
logger.debug("Found custom vspec ({})".format(self.version_name))
with open(vspec_path) as fp:
return json.load(fp)
else:
die("Specified version ({}) not available".format(self.version_name))
url = self.version_manifest["url"]
sha1 = self.version_manifest["sha1"]
if vspec_path.exists() and file_sha1(vspec_path) == sha1:
logger.debug(
"Using cached vspec files, hash matches manifest ({})".format(
self.version_name
)
)
with open(vspec_path) as fp:
return json.load(fp)
try:
logger.debug("Downloading vspec file")
raw = requests.get(url).content
vspec_path.parent.mkdir(parents=True, exist_ok=True)
with open(vspec_path, "wb") as fp:
fp.write(raw)
j = json.loads(raw)
return j
except requests.ConnectionError:
die("Failed to retrieve version json file. Check your internet connection.")
def get_raw_asset_index(self, asset_index_spec):
iid = asset_index_spec["id"]
url = asset_index_spec["url"]
sha1 = asset_index_spec["sha1"]
fpath = self.launcher.get_path(Directory.ASSET_INDEXES, "{}.json".format(iid))
if fpath.exists() and file_sha1(fpath) == sha1:
logger.debug("Using cached asset index, hash matches vspec")
with open(fpath) as fp:
return json.load(fp)
try:
logger.debug("Downloading new asset index")
raw = requests.get(url).content
with open(fpath, "wb") as fp:
fp.write(raw)
return json.loads(raw)
except requests.ConnectionError:
die("Failed to retrieve asset index.")
def get_raw_asset_index_nodl(self, id_):
fpath = self.launcher.get_path(Directory.ASSET_INDEXES, "{}.json".format(id_))
if fpath.exists():
with open(fpath) as fp:
return json.load(fp)
else:
die("Asset index specified in 'assets' not available.")
def get_libraries(self, java_info):
if java_info is not None:
key = java_info.get("java.home", None)
else:
key = None
if key and key in self._libraries:
return self._libraries[key]
else:
libs = []
for lib in self.vspec.libraries:
if "rules" in lib and not match_ruleset(lib["rules"], java_info):
continue
lib_obj = Library(lib)
if not lib_obj.available:
continue
libs.append(lib_obj)
if key:
self._libraries[key] = libs
return libs
def get_jarfile_dl(self, verify_hashes=False, force=False):
"""Checks existence and hash of cached jar. Returns None if ok, otherwise
returns download (url, size)"""
logger.debug("Attempting to use jarfile: {}".format(self.jarfile))
dlspec = self.vspec.downloads.get("client", None)
if dlspec is None:
logger.debug("jarfile dlspec not availble, skipping hash check.")
if not self.jarfile.exists():
die("jarfile does not exist and can not be downloaded.")
return
logger.debug("Checking jarfile.")
if (
force
or not self.jarfile.exists()
# The fabric-installer places an empty jarfile here, due to some
# quirk of an old (git blame 2 years) version of the vanilla launcher.
# https://github.com/FabricMC/fabric-installer/blob/master/src/main/java/net/fabricmc/installer/client/ClientInstaller.java#L49
or os.path.getsize(self.jarfile) == 0
or (verify_hashes and file_sha1(self.jarfile) != dlspec["sha1"])
):
logger.info(
"Jar file ({}) will be downloaded with libraries.".format(self.jarname)
)
return dlspec["url"], dlspec.get("size", None)
def download_libraries(self, java_info, verify_hashes=False, force=False):
"""Downloads missing libraries."""
logger.info("Checking libraries.")
q = DownloadQueue()
for library in self.get_libraries(java_info):
if not library.available:
continue
basedir = self.launcher.get_path(Directory.LIBRARIES)
abspath = library.get_abspath(basedir)
ok = abspath.is_file() and os.path.getsize(abspath) > 0
if verify_hashes and library.sha1 is not None:
ok = ok and file_sha1(abspath) == library.sha1
if not ok and not library.url:
logger.error(
f"Library {library.filename} is missing or corrupt "
"and has no download url."
)
continue
if force or not ok:
q.add(library.url, library.get_abspath(basedir), library.size)
jardl = self.get_jarfile_dl(verify_hashes, force)
if jardl is not None:
url, size = jardl
q.add(url, self.jarfile, size=size)
if len(q) > 0:
logger.info("Downloading {} libraries.".format(len(q)))
if not q.download():
logger.error(
"Some libraries failed to download. If they are part of a non-vanilla "
"profile, the original installer may need to be used."
)
def _populate_virtual_assets(self, asset_index, where):
for name, obj in asset_index["objects"].items():
sha = obj["hash"]
objpath = self.launcher.get_path(Directory.ASSET_OBJECTS, sha[0:2], sha)
path = where / PurePath(*name.split("/"))
# Maybe check file hash first? Would that be faster?
path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(objpath, path)
def get_virtual_asset_path(self):
return self.launcher.get_path(
Directory.ASSET_VIRTUAL, self.vspec.assetIndex["id"]
)
def prepare_assets_launch(self, gamedir):
launch_asset_index = self.get_raw_asset_index_nodl(self.vspec.assets)
is_map_resources = launch_asset_index.get("map_to_resources", False)
if is_map_resources:
logger.info("Mapping resources")
where = gamedir / "resources"
logger.debug("Resources path: {}".format(where))
self._populate_virtual_assets(launch_asset_index, where)
def download_assets(self, verify_hashes=False, force=False):
"""Downloads missing assets."""
hashes = dict()
for obj in self.raw_asset_index["objects"].values():
hashes[obj["hash"]] = obj["size"]
logger.info("Checking {} assets.".format(len(hashes)))
is_virtual = self.raw_asset_index.get("virtual", False)
fileset = set(recur_files(self.assets_root))
q = DownloadQueue()
objpath = self.launcher.get_path(Directory.ASSET_OBJECTS)
for sha in hashes:
abspath = objpath / sha[0:2] / sha
ok = abspath in fileset # file exists
if verify_hashes:
ok = ok and file_sha1(abspath) == sha
if force or not ok:
url = urllib.parse.urljoin(
self.ASSETS_URL, posixpath.join(sha[0:2], sha)
)
q.add(url, abspath, size=hashes[sha])
if len(q) > 0:
logger.info("Downloading {} assets.".format(len(q)))
if not q.download():
logger.warning("Some assets failed to download.")
if is_virtual:
logger.info("Copying virtual assets")
where = self.get_virtual_asset_path()
logger.debug("Virtual asset path: {}".format(where))
self._populate_virtual_assets(self.raw_asset_index, where)
def prepare(self, java_info=None, verify_hashes=False):
if not java_info:
java_info = get_java_info(self.launcher.global_config.get("java.path"))
self.download_libraries(java_info, verify_hashes)
if hasattr(self, "raw_asset_index"):
self.download_assets(verify_hashes)
def prepare_launch(self, gamedir, java_info, verify_hahes=False):
self.prepare(java_info, verify_hahes)
self.prepare_assets_launch(gamedir)
class VersionManager:
MANIFEST_URL = "https://launchermeta.mojang.com/mc/game/version_manifest_v2.json"
def __init__(self, launcher):
self.launcher = launcher
self.versions_root = launcher.get_path(Directory.VERSIONS)
self.manifest = self.get_manifest()
def resolve_version_name(self, v):
"""Takes a metaversion and resolves to a version."""
if v == "latest":
v = self.manifest["latest"]["release"]
logger.debug("Resolved latest -> {}".format(v))
elif v == "snapshot":
v = self.manifest["latest"]["snapshot"]
logger.debug("Resolved snapshot -> {}".format(v))
return v
def get_manifest(self):
manifest_filepath = self.launcher.get_path(Directory.VERSIONS, "manifest.json")
try:
m = requests.get(self.MANIFEST_URL).json()
with open(manifest_filepath, "w") as mfile:
json.dump(m, mfile, indent=4, sort_keys=True)
return m
except requests.ConnectionError:
logger.warning(
"Failed to retrieve version_manifest. "
"Check your internet connection."
)
try:
with open(manifest_filepath) as mfile:
logger.warning("Using cached version_manifest.")
return json.load(mfile)
except FileNotFoundError:
logger.warning("Cached version manifest not available.")
raise RuntimeError("Failed to retrieve version manifest.")
def version_list(self, vtype=VersionType.RELEASE, local=False):
r = [v["id"] for v in self.manifest["versions"] if vtype.match(v["type"])]
if local:
r += sorted(
"{} [local]".format(path.name)
for path in self.versions_root.iterdir()
if not path.name.startswith(".") and path.is_dir()
)
return r
def get_version(self, version_name):
name = self.resolve_version_name(version_name)
version_manifest = None
for ver in self.manifest["versions"]:
if ver["id"] == name:
version_manifest = ver
break
return Version(name, self.launcher, version_manifest)
| 37.599022 | 139 | 0.597737 |
3fccd7b5013d78cdb03fe6bfb434ab91d7754f9c | 925 | py | Python | HLTrigger/Configuration/python/HLT_75e33/modules/hltEle32WPTightBestGsfNLayerITL1SeededFilter_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltEle32WPTightBestGsfNLayerITL1SeededFilter_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/modules/hltEle32WPTightBestGsfNLayerITL1SeededFilter_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
hltEle32WPTightBestGsfNLayerITL1SeededFilter = cms.EDFilter("HLTEgammaGenericFilter",
absEtaLowEdges = cms.vdouble(0.0, 1.479),
candTag = cms.InputTag("hltEle32WPTightGsfDphiL1SeededFilter"),
doRhoCorrection = cms.bool(False),
effectiveAreas = cms.vdouble(0.0, 0.0),
energyLowEdges = cms.vdouble(0.0),
l1EGCand = cms.InputTag("hltEgammaCandidatesL1Seeded"),
lessThan = cms.bool(False),
ncandcut = cms.int32(1),
rhoMax = cms.double(99999999.0),
rhoScale = cms.double(1.0),
rhoTag = cms.InputTag(""),
saveTags = cms.bool(True),
thrOverE2EB = cms.vdouble(0),
thrOverE2EE = cms.vdouble(0),
thrOverEEB = cms.vdouble(0),
thrOverEEE = cms.vdouble(0),
thrRegularEB = cms.vdouble(3),
thrRegularEE = cms.vdouble(3),
useEt = cms.bool(False),
varTag = cms.InputTag("hltEgammaBestGsfTrackVarsL1Seeded","NLayerIT")
)
| 37 | 85 | 0.698378 |
af11f77ba516f8432a29818bc64b632261bee80e | 15,456 | py | Python | src/radical/saga/adaptors/redis/redis_advert.py | virthead/radical.saga | 265601d5e62013897de0eead89f522049b43cba9 | [
"MIT"
] | null | null | null | src/radical/saga/adaptors/redis/redis_advert.py | virthead/radical.saga | 265601d5e62013897de0eead89f522049b43cba9 | [
"MIT"
] | null | null | null | src/radical/saga/adaptors/redis/redis_advert.py | virthead/radical.saga | 265601d5e62013897de0eead89f522049b43cba9 | [
"MIT"
] | null | null | null |
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" Redis advert adaptor implementation """
from . import redis_namespace as rns
from ...url import Url
from ...task import Task
from ... import exceptions as rse
from ... import advert as api
from ...utils import misc as rsumisc
from ...adaptors import base as a_base
from ...adaptors.cpi import advert as cpi
from ...adaptors.cpi import decorators as cpi_decs
SYNC_CALL = cpi_decs.SYNC_CALL
ASYNC_CALL = cpi_decs.ASYNC_CALL
###############################################################################
# adaptor info
#
_ADAPTOR_NAME = 'radical.saga.adaptors.advert.redis'
_ADAPTOR_SCHEMAS = ['redis']
_ADAPTOR_OPTIONS = []
_ADAPTOR_CAPABILITIES = {}
_ADAPTOR_DOC = {
'name' : _ADAPTOR_NAME,
'cfg_options' : _ADAPTOR_OPTIONS,
'capabilities' : _ADAPTOR_CAPABILITIES,
'description' : 'The redis advert adaptor.',
'details' : """This adaptor interacts with a redis server to
implement the advert API semantics.""",
'schemas' : {'redis' : 'redis nosql backend.'}
}
_ADAPTOR_INFO = {
'name' : _ADAPTOR_NAME,
'version' : 'v0.2.beta',
'schemas' : _ADAPTOR_SCHEMAS,
'cpis' : [
{
'type' : 'radical.saga.advert.Directory',
'class' : 'RedisDirectory'
},
{
'type' : 'radical.saga.advert.Entry',
'class' : 'RedisEntry'
}
]
}
###############################################################################
# The adaptor class
class Adaptor (a_base.Base):
"""
This is the actual adaptor class, which gets loaded by SAGA (i.e. by the
SAGA engine), and which registers the CPI implementation classes which
provide the adaptor's functionality.
"""
# --------------------------------------------------------------------------
#
def __init__ (self) :
a_base.Base.__init__ (self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
# the adaptor *singleton* creates a (single) instance of a bulk handler
# (BulkDirectory), which implements container_* bulk methods.
self._bulk = BulkDirectory ()
self._redis = {}
# --------------------------------------------------------------------------
#
def get_redis (self, url) :
host = None
port = 6379
username = None
password = None
if url.host : host = url.host
if url.port : port = url.port
if url.username : username = url.username
if url.password : password = url.password
if username :
if password :
hash = "redis://%s:%s@%s:%d" % (username, password, host, port)
else :
hash = "redis://%s@%s:%d" % (username, host, port)
else :
if password :
hash = "redis://%s@%s:%d" % (password, host, port)
else :
hash = "redis://%s:%d" % (host, port)
if hash not in self._redis :
self._redis[hash] = rns.redis_ns_server (url)
return self._redis[hash]
# --------------------------------------------------------------------------
#
def sanity_check (self) :
# nothing to check for, redis entry system should always be accessible
pass
###############################################################################
#
class BulkDirectory (cpi.Directory) :
"""
Well, this implementation can handle bulks, but cannot optimize them.
We leave that code here anyway, for demonstration -- but those methods
are also provided as fallback, and are thusly used if the adaptor does
not implement the bulk container_* methods at all.
"""
# --------------------------------------------------------------------------
#
def __init__ (self) :
pass
# --------------------------------------------------------------------------
#
def container_wait (self, tasks, mode, timeout) :
if timeout >= 0 :
raise rse.BadParameter ("Cannot handle timeouts > 0")
for task in tasks :
task.wait ()
# --------------------------------------------------------------------------
#
def container_cancel (self, tasks, timeout) :
for task in tasks :
task.cancel (timeout)
# --------------------------------------------------------------------------
#
def container_copy (self, tasks) :
"""
A *good* implementation would dig the entry copy operations from the
tasks, and run them in a bulk -- we can't do that, so simply *run* the
individual tasks, falling back to the default non-bulk asynchronous copy
operation...
"""
for task in tasks :
task.run ()
# the container methods for the other calls are obviously similar, and left
# out here. The :class:`saga.task.Container` implementation will fall back
# to the non-bulk async calls for all then.
###############################################################################
#
class RedisDirectory (cpi.Directory) :
# --------------------------------------------------------------------------
#
def __init__ (self, api, adaptor) :
self._cpi_base = super (RedisDirectory, self)
self._cpi_base.__init__ (api, adaptor)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def init_instance (self, adaptor_state, url, flags, session) :
self._url = rsumisc.url_normalize (url)
self._flags = flags
self._container = self._adaptor._bulk
self._set_session (session)
self._init_check ()
return self.get_api ()
# --------------------------------------------------------------------------
#
@ASYNC_CALL
def init_instance_async (self, adaptor_state, url, flags, session, ttype) :
self._url = rsumisc.url_normalize (url)
self._flags = flags
self._set_session (session)
c = {'url' : self._url,
'flags' : self._flags }
return Task (self, 'init_instance', c, ttype)
# --------------------------------------------------------------------------
#
def _init_check (self) :
self._r = self._adaptor.get_redis (self._url)
self._nsdir = rns.redis_ns_entry.opendir(self._r, self._url.path,
self._flags)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_getter (self, key) :
try :
return self._nsdir.get_key (key)
except Exception as e :
self._logger.error ("get_key failed: %s" % e)
raise e
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_setter (self, key, val) :
try :
self._nsdir.set_key (key, val)
except Exception as e :
self._logger.error ("set_key failed: %s" % e)
raise e
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_lister (self) :
data = self._nsdir.get_data ()
for key in list(data.keys ()) :
self._api ()._attributes_i_set (key, data[key], self._api ()._UP)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_caller (self, key, id, cb) :
self._nsdir.manage_callback (key, id, cb, self.get_api ())
# --------------------------------------------------------------------------
#
@SYNC_CALL
def get_url (self) :
return self._url
# --------------------------------------------------------------------------
#
@SYNC_CALL
def is_dir (self, name) :
try :
api.Directory (rsumisc.url_make_absolute (self._url, name))
except Exception:
return False
return True
# --------------------------------------------------------------------------
#
@SYNC_CALL
def list (self, pattern, flags) :
if pattern :
raise rse.BadParameter ("pattern for list() not supported")
ret = []
if not flags :
ret = self._nsdir.list ()
elif flags == api.RECURSIVE :
# ------------------------------------------------------------------
def get_kids (path) :
d = api.Directory (path)
kids = d.list ()
for kid in kids :
kid_url = self._url
kid_url.path = kid
if d.is_dir (kid_url) :
get_kids (kid_url)
ret.append (kid)
# ------------------------------------------------------------------
get_kids (self._url)
else :
raise rse.BadParameter ("list() only supports the RECURSIVE flag")
return ret
# --------------------------------------------------------------------------
#
@SYNC_CALL
def change_dir (self, tgt) :
# backup state
orig_url = self._url
try :
if not rsumisc.url_is_compatible (tgt, self._url) :
raise rse.BadParameter("cannot chdir to %s, leaves namespace"
% tgt)
self._url = rsumisc.url_make_absolute (tgt, self._url)
self._init_check ()
finally :
# restore state on error
self._url = orig_url
# --------------------------------------------------------------------------
#
@SYNC_CALL
def open (self, url, flags) :
if not url.scheme and not url.host :
url = Url (str(self._url) + '/' + str(url))
return api.Entry (url, flags, self._session, _adaptor=self._adaptor)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def open_dir (self, url, flags) :
if not url.scheme and not url.host :
url = Url (str(self._url) + '/' + str(url))
return api.Directory (url, flags, self._session, _adaptor=self._adaptor)
# ##################################################################
# # FIXME: all below
# @SYNC_CALL
# def copy (self, source, target, flags) :
# return
#
# src_url = Url (source)
# src = src_url.path
# tgt_url = Url (target)
# tgt = tgt_url.path
#
#
# if src_url.schema :
# if not src_url.schema.lower () in _ADAPTOR_SCHEMAS :
# raise rse.BadParameter ("Cannot handle url %s (not redis)"
# % source)
#
# if tgt_url.schema :
# if not tgt_url.schema.lower () in _ADAPTOR_SCHEMAS :
# raise rse.BadParameter ("Cannot handle url %s (not redis)"
# % target)
#
#
# # make paths absolute
# if src[0] != '/' : src = "%s/%s" % (os.path.dirname (src), src)
# if tgt[0] != '/' : tgt = "%s/%s" % (os.path.dirname (src), tgt)
#
# shutil.copy2 (src, tgt)
#
#
# @ASYNC_CALL
# def copy_async (self, src, tgt, flags, ttype) :
#
# c = { 'src' : src,
# 'tgt' : tgt,
# 'flags' : flags }
#
# return apip_task.Task (self, 'copy', c, ttype)
#
#
#
# def task_wait (self, task, timout) :
# # FIXME: our task_run moves all tasks into DONE state... :-/
# pass
######################################################################
#
# entry adaptor class
#
class RedisEntry (cpi.Entry) :
# --------------------------------------------------------------------------
#
def __init__ (self, api, adaptor) :
self._cpi_base = super (RedisEntry, self)
self._cpi_base.__init__ (api, adaptor)
# --------------------------------------------------------------------------
#
def _dump (self) :
self._logger.debug ("url : %s" % self._url)
self._logger.debug ("flags: %s" % self._flags)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def init_instance (self, adaptor_state, url, flags, session) :
self._url = url
self._flags = flags
self._set_session (session)
self._init_check ()
return self
# --------------------------------------------------------------------------
#
def _init_check (self) :
self._r = self._adaptor.get_redis (self._url)
self._nsentry = rns.redis_ns_entry.open (self._r, self._url.path,
self._flags)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_getter (self, key) :
return self._nsentry.get_key (key)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_setter (self, key, val) :
return self._nsentry.set_key (key, val)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_lister (self) :
data = self._nsentry.get_data ()
for key in list(data.keys ()) :
self._api ()._attributes_i_set (key, data[key], self._api ()._UP)
# --------------------------------------------------------------------------
#
@SYNC_CALL
def attribute_caller (self, key, id, cb) :
return self._nsentry.manage_callback (key, id, cb, self.get_api ())
# --------------------------------------------------------------------------
#
@SYNC_CALL
def get_url (self) :
return self._url
# ##################################################################
# # FIXME: all below
# @SYNC_CALL
# def copy_self (self, target, flags) :
#
# tgt_url = Url (target)
# tgt = tgt_url.path
# src = self._url.path
#
# if tgt_url.schema :
# if not tgt_url.schema.lower () in _ADAPTOR_SCHEMAS :
# raise rse.BadParameter ("Cannot handle url %s (not redis)"
# % target)
#
# if not rsumisc.url_is_redis (tgt_url) :
# raise rse.BadParameter ("Cannot handle url %s (not redis)"
# % target)
#
# # make path absolute
# if tgt[0] != '/' : tgt = "%s/%s" % (os.path.dirname (src), tgt)
#
# shutil.copy2 (src, tgt)
# ------------------------------------------------------------------------------
| 28.516605 | 80 | 0.415631 |
8fbe5bdb7dec1f46e7f9ee7a781a374470e451d4 | 7,720 | py | Python | openpifpaf/plugins/coco/constants.py | adujardin/openpifpaf | 4fa79162f5529f5b0de72e2312aab54d410bee3f | [
"CC-BY-2.0"
] | null | null | null | openpifpaf/plugins/coco/constants.py | adujardin/openpifpaf | 4fa79162f5529f5b0de72e2312aab54d410bee3f | [
"CC-BY-2.0"
] | null | null | null | openpifpaf/plugins/coco/constants.py | adujardin/openpifpaf | 4fa79162f5529f5b0de72e2312aab54d410bee3f | [
"CC-BY-2.0"
] | null | null | null | import numpy as np
COCO_PERSON_SKELETON = [
(16, 14), (14, 12), (17, 15), (15, 13), (12, 13), (6, 12), (7, 13),
(6, 7), (6, 8), (7, 9), (8, 10), (9, 11), (2, 3), (1, 2), (1, 3),
(2, 4), (3, 5), (4, 6), (5, 7),
]
KINEMATIC_TREE_SKELETON = [
(1, 2), (2, 4), # left head
(1, 3), (3, 5),
(1, 6),
(6, 8), (8, 10), # left arm
(1, 7),
(7, 9), (9, 11), # right arm
(6, 12), (12, 14), (14, 16), # left side
(7, 13), (13, 15), (15, 17),
]
COCO_KEYPOINTS = [
'nose', # 1
'left_eye', # 2
'right_eye', # 3
'left_ear', # 4
'right_ear', # 5
'left_shoulder', # 6
'right_shoulder', # 7
'left_elbow', # 8
'right_elbow', # 9
'left_wrist', # 10
'right_wrist', # 11
'left_hip', # 12
'right_hip', # 13
'left_knee', # 14
'right_knee', # 15
'left_ankle', # 16
'right_ankle', # 17
]
COCO_UPRIGHT_POSE = np.array([
[0.0, 9.3, 2.0], # 'nose', # 1
[-0.35, 9.7, 2.0], # 'left_eye', # 2
[0.35, 9.7, 2.0], # 'right_eye', # 3
[-0.7, 9.5, 2.0], # 'left_ear', # 4
[0.7, 9.5, 2.0], # 'right_ear', # 5
[-1.4, 8.0, 2.0], # 'left_shoulder', # 6
[1.4, 8.0, 2.0], # 'right_shoulder', # 7
[-1.75, 6.0, 2.0], # 'left_elbow', # 8
[1.75, 6.2, 2.0], # 'right_elbow', # 9
[-1.75, 4.0, 2.0], # 'left_wrist', # 10
[1.75, 4.2, 2.0], # 'right_wrist', # 11
[-1.26, 4.0, 2.0], # 'left_hip', # 12
[1.26, 4.0, 2.0], # 'right_hip', # 13
[-1.4, 2.0, 2.0], # 'left_knee', # 14
[1.4, 2.1, 2.0], # 'right_knee', # 15
[-1.4, 0.0, 2.0], # 'left_ankle', # 16
[1.4, 0.1, 2.0], # 'right_ankle', # 17
])
COCO_DAVINCI_POSE = np.array([
[0.0, 9.3, 2.0], # 'nose', # 1
[-0.35, 9.7, 2.0], # 'left_eye', # 2
[0.35, 9.7, 2.0], # 'right_eye', # 3
[-0.7, 9.5, 2.0], # 'left_ear', # 4
[0.7, 9.5, 2.0], # 'right_ear', # 5
[-1.4, 8.0, 2.0], # 'left_shoulder', # 6
[1.4, 8.0, 2.0], # 'right_shoulder', # 7
[-3.3, 9.0, 2.0], # 'left_elbow', # 8
[3.3, 9.2, 2.0], # 'right_elbow', # 9
[-4.5, 10.5, 2.0], # 'left_wrist', # 10
[4.5, 10.7, 2.0], # 'right_wrist', # 11
[-1.26, 4.0, 2.0], # 'left_hip', # 12
[1.26, 4.0, 2.0], # 'right_hip', # 13
[-2.0, 2.0, 2.0], # 'left_knee', # 14
[2.0, 2.1, 2.0], # 'right_knee', # 15
[-2.4, 0.0, 2.0], # 'left_ankle', # 16
[2.4, 0.1, 2.0], # 'right_ankle', # 17
])
HFLIP = {
'left_eye': 'right_eye',
'right_eye': 'left_eye',
'left_ear': 'right_ear',
'right_ear': 'left_ear',
'left_shoulder': 'right_shoulder',
'right_shoulder': 'left_shoulder',
'left_elbow': 'right_elbow',
'right_elbow': 'left_elbow',
'left_wrist': 'right_wrist',
'right_wrist': 'left_wrist',
'left_hip': 'right_hip',
'right_hip': 'left_hip',
'left_knee': 'right_knee',
'right_knee': 'left_knee',
'left_ankle': 'right_ankle',
'right_ankle': 'left_ankle',
}
DENSER_COCO_PERSON_SKELETON = [
(1, 2), (1, 3), (2, 3), (1, 4), (1, 5), (4, 5),
(1, 6), (1, 7), (2, 6), (3, 7),
(2, 4), (3, 5), (4, 6), (5, 7), (6, 7),
(6, 12), (7, 13), (6, 13), (7, 12), (12, 13),
(6, 8), (7, 9), (8, 10), (9, 11), (6, 10), (7, 11),
(8, 9), (10, 11),
(10, 12), (11, 13),
(10, 14), (11, 15),
(14, 12), (15, 13), (12, 15), (13, 14),
(12, 16), (13, 17),
(16, 14), (17, 15), (14, 17), (15, 16),
(14, 15), (16, 17),
]
DENSER_COCO_PERSON_CONNECTIONS = [
c
for c in DENSER_COCO_PERSON_SKELETON
if c not in COCO_PERSON_SKELETON
]
COCO_PERSON_SIGMAS = [
0.026, # nose
0.025, # eyes
0.025, # eyes
0.035, # ears
0.035, # ears
0.079, # shoulders
0.079, # shoulders
0.072, # elbows
0.072, # elbows
0.062, # wrists
0.062, # wrists
0.107, # hips
0.107, # hips
0.087, # knees
0.087, # knees
0.089, # ankles
0.089, # ankles
]
COCO_PERSON_SCORE_WEIGHTS = [3.0] * 3 + [1.0] * (len(COCO_KEYPOINTS) - 3)
COCO_CATEGORIES = [
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'street sign',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'hat',
'backpack',
'umbrella',
'shoe',
'eye glasses',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'plate',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'mirror',
'dining table',
'window',
'desk',
'toilet',
'door',
'tv',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'blender',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
'hair brush',
]
def draw_skeletons(pose):
import openpifpaf # pylint: disable=import-outside-toplevel
openpifpaf.show.KeypointPainter.show_joint_scales = True
keypoint_painter = openpifpaf.show.KeypointPainter()
scale = np.sqrt(
(np.max(pose[:, 0]) - np.min(pose[:, 0]))
* (np.max(pose[:, 1]) - np.min(pose[:, 1]))
)
ann = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=COCO_PERSON_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann, filename='docs/skeleton_coco.png') as ax:
keypoint_painter.annotation(ax, ann)
ann_kin = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=KINEMATIC_TREE_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann_kin.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann_kin, filename='docs/skeleton_kinematic_tree.png') as ax:
keypoint_painter.annotation(ax, ann_kin)
ann_dense = openpifpaf.Annotation(keypoints=COCO_KEYPOINTS,
skeleton=DENSER_COCO_PERSON_SKELETON,
score_weights=COCO_PERSON_SCORE_WEIGHTS)
ann_dense.set(pose, np.array(COCO_PERSON_SIGMAS) * scale)
with openpifpaf.show.Canvas.annotation(
ann, ann_bg=ann_dense, filename='docs/skeleton_dense.png') as ax:
keypoint_painter.annotation(ax, ann_dense)
def print_associations():
for j1, j2 in COCO_PERSON_SKELETON:
print(COCO_KEYPOINTS[j1 - 1], '-', COCO_KEYPOINTS[j2 - 1])
if __name__ == '__main__':
print_associations()
# c, s = np.cos(np.radians(45)), np.sin(np.radians(45))
# rotate = np.array(((c, -s), (s, c)))
# rotated_pose = np.copy(COCO_DAVINCI_POSE)
# rotated_pose[:, :2] = np.einsum('ij,kj->ki', rotate, rotated_pose[:, :2])
draw_skeletons(COCO_UPRIGHT_POSE)
| 26.081081 | 79 | 0.48886 |
02c946fc18d98653b7474d615a03d7d4f3b707ce | 3,968 | py | Python | the_auto.py | racrbmr/treasure-bot | 01981a864cffd4f32c9e2b4fcee29040cbd9d24d | [
"Unlicense"
] | 1 | 2021-04-20T15:40:17.000Z | 2021-04-20T15:40:17.000Z | the_auto.py | racrbmr/treasure-bot | 01981a864cffd4f32c9e2b4fcee29040cbd9d24d | [
"Unlicense"
] | null | null | null | the_auto.py | racrbmr/treasure-bot | 01981a864cffd4f32c9e2b4fcee29040cbd9d24d | [
"Unlicense"
] | null | null | null | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time, requests
print("")
print("--- Treasure Referral BOT ---")
print("")
before = int(input("how much is ur total storage now?(GB): "))
print(" 1x process = 10 GB")
how_many = int(input("how many process do u want?: "))
if how_many <= 0:
print(" Nope, minimum 1x process")
exit()
else:
pass
print("")
print("The bot will repeat the process for", how_many, "time(s) or increase in storage of", how_many * 10, "GB")
print("---- it is recommended to not go over 5x -----")
time.sleep(3)
after = how_many * 10
treasure = input("Input ur treasure ref link: ")
def auto():
delay = time.sleep
driver_1 = webdriver.Chrome(ChromeDriverManager().install())
driver_2 = webdriver.Chrome(ChromeDriverManager().install())
print("")
print("now get chill, don't u dare push/press anything. ok?")
print("1x process estimated for 5 minutes")
print("")
delay(5)
tmpr_email = "https://emailfake.com"
the_api = 'https://api.namefake.com'
driver_1.get(treasure)
data_gen = requests.get(the_api).json()
the_name = (data_gen['name'])
passwds = (data_gen['password'])
e_u = (data_gen['email_u'])
e_d = (data_gen['email_d'])
#uuid for fulfill the requirement of the password length.
#i got some case the password is less than requirement's length
uuid = (data_gen['uuid'])
driver_1.maximize_window()
driver_2.maximize_window()
delay(4)
print("Maximizing Window...")
delay(2)
print("getting tempr email cred")
driver_2.get(tmpr_email + '/' + e_u + '@' + e_d)
delay(4)
driver_2.find_element_by_xpath('//*[@id="copbtn"]').click()
delay(2)
driver_1.find_element_by_xpath('//*[@id="mat-input-0"]').send_keys(Keys.CONTROL, "v")
delay(2)
#name
driver_1.find_element_by_xpath('//*[@id="mat-input-1"]').send_keys(the_name)
print("some name...")
delay(7)
#passwd
driver_1.find_element_by_xpath('//*[@id="mat-input-2"]').send_keys(passwds + uuid)
print("some password...")
delay(7)
#confir-passwd
driver_1.find_element_by_xpath('//*[@id="mat-input-3"]').send_keys(passwds + uuid)
delay(7)
#uncheckbox
driver_1.find_element_by_xpath('//*[@id="opt-in-checkbox"]/label/div').click()
delay(7)
#signup
driver_1.find_element_by_xpath('//*[@id="continue-button"]').click()
print("finishing...")
delay(7)
#yes_continue
driver_1.find_element_by_xpath('//*[@id="mat-dialog-0"]/app-alert-dialog/div[3]/button[2]').click()
delay(15)
print("registering...")
delay(1)
print("")
print("patient ya bosquu, waiting for the email")
delay(40)
driver_2.find_element_by_xpath('//*[@id="refresh"]/div').click()
link = driver_2.find_element_by_xpath('//*[@id="hs_cos_wrapper_module_16158863191553_"]/p[1]/span/strong').text
delay(7)
print("get the activation code.")
print("inputting the code")
delay(2)
driver_1.find_element_by_xpath('//*[@id="verify-code-form"]/div[1]/div[1]/input').send_keys(link)
delay(7)
#finish
driver_1.find_element_by_xpath('//*[@id="finish-button"]').click()
print("")
print("COMPLETE! just wait a bit ok, in case ur internet is like a snail")
delay(15)
driver_1.quit()
driver_2.quit()
#done
if how_many == 1:
how_many = 0
else:
pass
for i in range(how_many-1):
auto()
auto()
print("")
print("Before:", before, "GB of Storage")
print("After:", before + after, "GB of Storage")
print("ENJOY BOSQU")
print("")
print(" by: racrbmr")
print(" github.com/racrbmr")
print("")
| 27.555556 | 116 | 0.622732 |
d0096702d39cd0eccdfc81c1ee491d2cf53dfb4b | 42,828 | py | Python | photutils/detection/findstars.py | nden/photutils | 87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2 | [
"BSD-3-Clause"
] | null | null | null | photutils/detection/findstars.py | nden/photutils | 87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2 | [
"BSD-3-Clause"
] | null | null | null | photutils/detection/findstars.py | nden/photutils | 87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes for detecting stars in an astronomical
image. The convention is that all star-finding classes are subclasses of
an abstract base class called ``StarFinderBase``. Each star-finding
class should define a method called ``find_stars`` that finds stars in
an image.
"""
import abc
import math
import warnings
import numpy as np
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.table import Table
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils import lazyproperty
from .core import find_peaks
from ..utils._moments import _moments, _moments_central
from ..utils.convolution import filter_data
from ..utils.misc import _ABCMetaAndInheritDocstrings
__all__ = ['StarFinderBase', 'DAOStarFinder', 'IRAFStarFinder']
class _StarFinderKernel:
"""
Class to calculate a 2D Gaussian density enhancement kernel.
The kernel has negative wings and sums to zero. It is used by both
`DAOStarFinder` and `IRAFStarFinder`.
Parameters
----------
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor and major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel, measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``]. The default is 1.5.
normalize_zerosum : bool, optional
Whether to normalize the Gaussian kernel to have zero sum, The
default is `True`, which generates a density-enhancement kernel.
Notes
-----
The class attributes include the dimensions of the elliptical kernel
and the coefficients of a 2D elliptical Gaussian function expressed
as:
``f(x,y) = A * exp(-g(x,y))``
where
``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2``
References
----------
.. [1] http://en.wikipedia.org/wiki/Gaussian_function
"""
def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
normalize_zerosum=True):
if fwhm < 0:
raise ValueError('fwhm must be positive.')
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be positive and less or equal '
'than 1.')
if sigma_radius <= 0:
raise ValueError('sigma_radius must be positive.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
self.ysigma = self.xsigma * self.ratio
theta_radians = np.deg2rad(self.theta)
cost = np.cos(theta_radians)
sint = np.sin(theta_radians)
xsigma2 = self.xsigma**2
ysigma2 = self.ysigma**2
self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
# CCW
self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))
# find the extent of an ellipse with radius = sigma_radius*sigma;
# solve for the horizontal and vertical tangents of an ellipse
# defined by g(x,y) = f
self.f = self.sigma_radius**2 / 2.0
denom = (self.a * self.c) - self.b**2
# nx and ny are always odd
self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1
self.xc = self.xradius = self.nx // 2
self.yc = self.yradius = self.ny // 2
# define the kernel on a 2D grid
yy, xx = np.mgrid[0:self.ny, 0:self.nx]
self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
self.elliptical_radius = (self.a * (xx - self.xc)**2 +
2.0 * self.b * (xx - self.xc) *
(yy - self.yc) +
self.c * (yy - self.yc)**2)
self.mask = np.where(
(self.elliptical_radius <= self.f) |
(self.circular_radius <= 2.0), 1, 0).astype(np.int)
self.npixels = self.mask.sum()
# NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask
# denom = variance * npixels
denom = ((self.gaussian_kernel**2).sum() -
(self.gaussian_kernel.sum()**2 / self.npixels))
self.relerr = 1.0 / np.sqrt(denom)
# normalize the kernel to zero sum
if normalize_zerosum:
self.data = ((self.gaussian_kernel -
(self.gaussian_kernel.sum() / self.npixels)) /
denom) * self.mask
else:
self.data = self.gaussian_kernel
self.shape = self.data.shape
return
class _StarCutout:
"""
Class to hold a 2D image cutout of a single star for the star finder
classes.
Parameters
----------
data : 2D array_like
The cutout 2D image from the input unconvolved 2D image.
convdata : 2D array_like
The cutout 2D image from the convolved 2D image.
slices : tuple of two slices
A tuple of two slices representing the minimal box of the cutout
from the original image.
xpeak, ypeak : float
The (x, y) pixel coordinates of the peak pixel.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``data``.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold value input to the star finder
class multiplied by the kernel relerr.
"""
def __init__(self, data, convdata, slices, xpeak, ypeak, kernel,
threshold_eff):
self.data = data
self.convdata = convdata
self.slices = slices
self.xpeak = xpeak
self.ypeak = ypeak
self.kernel = kernel
self.threshold_eff = threshold_eff
self.shape = data.shape
self.nx = self.shape[1] # always odd
self.ny = self.shape[0] # always odd
self.cutout_xcenter = int(self.nx // 2)
self.cutout_ycenter = int(self.ny // 2)
self.xorigin = self.slices[1].start # in original image
self.yorigin = self.slices[0].start # in original image
self.mask = kernel.mask # kernel mask
self.npixels = kernel.npixels # unmasked pixels
self.data_masked = self.data * self.mask
class _DAOFind_Properties:
"""
Class to calculate the properties of each detected star, as defined
by `DAOFIND`_.
Parameters
----------
star_cutout : `_StarCutout`
A `_StarCutout` object containing the image cutout for the star.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``star_cutout``.
sky : float, optional
The local sky level around the source. ``sky`` is used only to
calculate the source peak value, flux, and magnitude. The
default is 0.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
"""
def __init__(self, star_cutout, kernel, sky=0.):
if not isinstance(star_cutout, _StarCutout):
raise ValueError('data must be an _StarCutout object')
if star_cutout.data.shape != kernel.shape:
raise ValueError('cutout and kernel must have the same shape')
self.cutout = star_cutout
self.kernel = kernel
self.sky = sky # DAOFIND has no sky input -> same as sky=0.
self.data = star_cutout.data
self.data_masked = star_cutout.data_masked
self.npixels = star_cutout.npixels # unmasked pixels
self.nx = star_cutout.nx
self.ny = star_cutout.ny
self.xcenter = star_cutout.cutout_xcenter
self.ycenter = star_cutout.cutout_ycenter
@lazyproperty
def data_peak(self):
return self.data[self.ycenter, self.xcenter]
@lazyproperty
def conv_peak(self):
return self.cutout.convdata[self.ycenter, self.xcenter]
@lazyproperty
def roundness1(self):
# set the central (peak) pixel to zero
cutout_conv = self.cutout.convdata.copy()
cutout_conv[self.ycenter, self.xcenter] = 0.0
# calculate the four roundness quadrants
quad1 = cutout_conv[0:self.ycenter + 1, self.xcenter + 1:]
quad2 = cutout_conv[0:self.ycenter, 0:self.xcenter + 1]
quad3 = cutout_conv[self.ycenter:, 0:self.xcenter]
quad4 = cutout_conv[self.ycenter + 1:, self.xcenter:]
sum2 = -quad1.sum() + quad2.sum() - quad3.sum() + quad4.sum()
if sum2 == 0:
return 0.
sum4 = np.abs(cutout_conv).sum()
if sum4 <= 0:
return None
return 2.0 * sum2 / sum4
@lazyproperty
def sharpness(self):
npixels = self.npixels - 1 # exclude the peak pixel
data_mean = (np.sum(self.data_masked) - self.data_peak) / npixels
return (self.data_peak - data_mean) / self.conv_peak
def daofind_marginal_fit(self, axis=0):
"""
Fit 1D Gaussians, defined from the marginal x/y kernel
distributions, to the marginal x/y distributions of the original
(unconvolved) image.
These fits are used calculate the star centroid and roundness
("GROUND") properties.
Parameters
----------
axis : {0, 1}, optional
The axis for which the marginal fit is performed:
* 0: for the x axis
* 1: for the y axis
Returns
-------
dx : float
The fractional shift in x or y (depending on ``axis`` value)
of the image centroid relative to the maximum pixel.
hx : float
The height of the best-fitting Gaussian to the marginal x or
y (depending on ``axis`` value) distribution of the
unconvolved source data.
"""
# define triangular weighting functions along each axis, peaked
# in the middle and equal to one at the edge
x = self.xcenter - np.abs(np.arange(self.nx) - self.xcenter) + 1
y = self.ycenter - np.abs(np.arange(self.ny) - self.ycenter) + 1
xwt, ywt = np.meshgrid(x, y)
if axis == 0: # marginal distributions along x axis
wt = xwt[0] # 1D
wts = ywt # 2D
size = self.nx
center = self.xcenter
sigma = self.kernel.xsigma
dxx = center - np.arange(size)
elif axis == 1: # marginal distributions along y axis
wt = np.transpose(ywt)[0] # 1D
wts = xwt # 2D
size = self.ny
center = self.ycenter
sigma = self.kernel.ysigma
dxx = np.arange(size) - center
# compute marginal sums for given axis
wt_sum = np.sum(wt)
dx = center - np.arange(size)
# weighted marginal sums
kern_sum_1d = np.sum(self.kernel.gaussian_kernel_unmasked * wts,
axis=axis)
kern_sum = np.sum(kern_sum_1d * wt)
kern2_sum = np.sum(kern_sum_1d**2 * wt)
dkern_dx = kern_sum_1d * dx
dkern_dx_sum = np.sum(dkern_dx * wt)
dkern_dx2_sum = np.sum(dkern_dx**2 * wt)
kern_dkern_dx_sum = np.sum(kern_sum_1d * dkern_dx * wt)
data_sum_1d = np.sum(self.data * wts, axis=axis)
data_sum = np.sum(data_sum_1d * wt)
data_kern_sum = np.sum(data_sum_1d * kern_sum_1d * wt)
data_dkern_dx_sum = np.sum(data_sum_1d * dkern_dx * wt)
data_dx_sum = np.sum(data_sum_1d * dxx * wt)
# perform linear least-squares fit (where data = sky + hx*kernel)
# to find the amplitude (hx)
# reject the star if the fit amplitude is not positive
hx_numer = data_kern_sum - (data_sum * kern_sum) / wt_sum
if hx_numer <= 0.:
return np.nan, np.nan
hx_denom = kern2_sum - (kern_sum**2 / wt_sum)
if hx_denom <= 0.:
return np.nan, np.nan
# compute fit amplitude
hx = hx_numer / hx_denom
# sky = (data_sum - (hx * kern_sum)) / wt_sum
# compute centroid shift
dx = ((kern_dkern_dx_sum -
(data_dkern_dx_sum - dkern_dx_sum*data_sum)) /
(hx * dkern_dx2_sum / sigma**2))
hsize = size / 2.
if abs(dx) > hsize:
if data_sum == 0.:
dx = 0.0
else:
dx = data_dx_sum / data_sum
if abs(dx) > hsize:
dx = 0.0
return dx, hx
@lazyproperty
def dx_hx(self):
return self.daofind_marginal_fit(axis=0)
@lazyproperty
def dy_hy(self):
return self.daofind_marginal_fit(axis=1)
@lazyproperty
def dx(self):
return self.dx_hx[0]
@lazyproperty
def dy(self):
return self.dy_hy[0]
@lazyproperty
def xcentroid(self):
return self.cutout.xpeak + self.dx
@lazyproperty
def ycentroid(self):
return self.cutout.ypeak + self.dy
@lazyproperty
def hx(self):
return self.dx_hx[1]
@lazyproperty
def hy(self):
return self.dy_hy[1]
@lazyproperty
def roundness2(self):
"""
The star roundness.
This roundness parameter represents the ratio of the difference
in the height of the best fitting Gaussian function in x minus
the best fitting Gaussian function in y, divided by the average
of the best fitting Gaussian functions in x and y. A circular
source will have a zero roundness. A source extended in x or y
will have a negative or positive roundness, respectively.
"""
if np.isnan(self.hx) or np.isnan(self.hy):
return np.nan
else:
return 2.0 * (self.hx - self.hy) / (self.hx + self.hy)
@lazyproperty
def peak(self):
return self.data_peak - self.sky
@lazyproperty
def npix(self):
"""
The total number of pixels in the rectangular cutout image.
"""
return self.data.size
@lazyproperty
def flux(self):
return ((self.conv_peak / self.cutout.threshold_eff) -
(self.sky * self.npix))
@lazyproperty
def mag(self):
if self.flux <= 0:
return np.nan
else:
return -2.5 * np.log10(self.flux)
class _IRAFStarFind_Properties:
"""
Class to calculate the properties of each detected star, as defined
by IRAF's ``starfind`` task.
Parameters
----------
star_cutout : `_StarCutout`
A `_StarCutout` object containing the image cutout for the star.
kernel : `_StarFinderKernel`
The convolution kernel. The shape of the kernel must match that
of the input ``star_cutout``.
sky : `None` or float, optional
The local sky level around the source. If sky is ``None``, then
a local sky level will be (crudely) estimated using the IRAF
``starfind`` calculation.
"""
def __init__(self, star_cutout, kernel, sky=None):
if not isinstance(star_cutout, _StarCutout):
raise ValueError('data must be an _StarCutout object')
if star_cutout.data.shape != kernel.shape:
raise ValueError('cutout and kernel must have the same shape')
self.cutout = star_cutout
self.kernel = kernel
if sky is None:
skymask = ~self.kernel.mask.astype(np.bool) # 1=sky, 0=obj
nsky = np.count_nonzero(skymask)
if nsky == 0:
mean_sky = (np.max(self.cutout.data) -
np.max(self.cutout.convdata))
else:
mean_sky = np.sum(self.cutout.data * skymask) / nsky
self.sky = mean_sky
else:
self.sky = sky
@lazyproperty
def data(self):
cutout = np.array((self.cutout.data - self.sky) * self.cutout.mask)
# IRAF starfind discards negative pixels
cutout = np.where(cutout > 0, cutout, 0)
return cutout
@lazyproperty
def moments(self):
return _moments(self.data, order=1)
@lazyproperty
def cutout_xcentroid(self):
return self.moments[0, 1] / self.moments[0, 0]
@lazyproperty
def cutout_ycentroid(self):
return self.moments[1, 0] / self.moments[0, 0]
@lazyproperty
def xcentroid(self):
return self.cutout_xcentroid + self.cutout.xorigin
@lazyproperty
def ycentroid(self):
return self.cutout_ycentroid + self.cutout.yorigin
@lazyproperty
def npix(self):
return np.count_nonzero(self.data)
@lazyproperty
def sky(self):
return self.sky
@lazyproperty
def peak(self):
return np.max(self.data)
@lazyproperty
def flux(self):
return np.sum(self.data)
@lazyproperty
def mag(self):
return -2.5 * np.log10(self.flux)
@lazyproperty
def moments_central(self):
return _moments_central(
self.data, (self.cutout_xcentroid, self.cutout_ycentroid),
order=2) / self.moments[0, 0]
@lazyproperty
def mu_sum(self):
return self.moments_central[0, 2] + self.moments_central[2, 0]
@lazyproperty
def mu_diff(self):
return self.moments_central[0, 2] - self.moments_central[2, 0]
@lazyproperty
def fwhm(self):
return 2.0 * np.sqrt(np.log(2.0) * self.mu_sum)
@lazyproperty
def sharpness(self):
return self.fwhm / self.kernel.fwhm
@lazyproperty
def roundness(self):
return np.sqrt(self.mu_diff**2 +
4.0 * self.moments_central[1, 1]**2) / self.mu_sum
@lazyproperty
def pa(self):
pa = np.rad2deg(0.5 * np.arctan2(2.0 * self.moments_central[1, 1],
self.mu_diff))
if pa < 0.:
pa += 180.
return pa
def _find_stars(data, kernel, threshold_eff, min_separation=None,
mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
data : 2D array_like
The 2D array of the image.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold_eff : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
objects : list of `_StarCutout`
A list of `_StarCutout` objects containing the image cutout for
each source.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
"""
convolved_data = filter_data(data, kernel.data, mode='constant',
fill_value=0.0, check_normalization=False)
# define a local footprint for the peak finder
if min_separation is None: # daofind
footprint = kernel.mask.astype(np.bool)
else:
# define a circular footprint
idx = np.arange(-min_separation, min_separation + 1)
xx, yy = np.meshgrid(idx, idx)
footprint = np.array((xx**2 + yy**2) <= min_separation**2, dtype=int)
# pad the data and convolved image by the kernel x/y radius to allow
# for detections near the edges
if not exclude_border:
ypad = kernel.yradius
xpad = kernel.xradius
pad = ((ypad, ypad), (xpad, xpad))
# mode must be a string for numpy < 0.11
# (see https://github.com/numpy/numpy/issues/7112)
mode = str('constant')
data = np.pad(data, pad, mode=mode, constant_values=[0.])
if mask is not None:
mask = np.pad(mask, pad, mode=mode, constant_values=[0.])
convolved_data = np.pad(convolved_data, pad, mode=mode,
constant_values=[0.])
# find local peaks in the convolved data
tbl = find_peaks(convolved_data, threshold_eff, footprint=footprint,
mask=mask)
if len(tbl) == 0:
return []
coords = np.transpose([tbl['y_peak'], tbl['x_peak']])
star_cutouts = []
for (ypeak, xpeak) in coords:
# now extract the object from the data, centered on the peak
# pixel in the convolved image, with the same size as the kernel
x0 = xpeak - kernel.xradius
x1 = xpeak + kernel.xradius + 1
y0 = ypeak - kernel.yradius
y1 = ypeak + kernel.yradius + 1
if x0 < 0 or x1 > data.shape[1]:
continue # pragma: no cover
if y0 < 0 or y1 > data.shape[0]:
continue # pragma: no cover
slices = (slice(y0, y1), slice(x0, x1))
data_cutout = data[slices]
convdata_cutout = convolved_data[slices]
# correct pixel values for the previous image padding
if not exclude_border:
x0 -= kernel.xradius
x1 -= kernel.xradius
y0 -= kernel.yradius
y1 -= kernel.yradius
xpeak -= kernel.xradius
ypeak -= kernel.yradius
slices = (slice(y0, y1), slice(x0, x1))
star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices,
xpeak, ypeak, kernel, threshold_eff))
return star_cutouts
class StarFinderBase(metaclass=_ABCMetaAndInheritDocstrings):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table`
A table of found stars. If no stars are found then an empty
table is returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class DAOStarFinder(StarFinderBase):
"""
Detect stars in an image using the DAOFIND (`Stetson 1987
<http://adsabs.harvard.edu/abs/1987PASP...99..191S>`_) algorithm.
DAOFIND (`Stetson 1987; PASP 99, 191
<http://adsabs.harvard.edu/abs/1987PASP...99..191S>`_) searches
images for local density maxima that have a peak amplitude greater
than ``threshold`` (approximately; ``threshold`` is applied to a
convolved image) and have a size and shape similar to the defined 2D
Gaussian kernel. The Gaussian kernel is defined by the ``fwhm``,
``ratio``, ``theta``, and ``sigma_radius`` input parameters.
``DAOStarFinder`` finds the object centroid by fitting the marginal x
and y 1D distributions of the Gaussian kernel to the marginal x and
y distributions of the input (unconvolved) ``data`` image.
``DAOStarFinder`` calculates the object roundness using two methods. The
``roundlo`` and ``roundhi`` bounds are applied to both measures of
roundness. The first method (``roundness1``; called ``SROUND`` in
`DAOFIND`_) is based on the source symmetry and is the ratio of a
measure of the object's bilateral (2-fold) to four-fold symmetry.
The second roundness statistic (``roundness2``; called ``GROUND`` in
`DAOFIND`_) measures the ratio of the difference in the height of
the best fitting Gaussian function in x minus the best fitting
Gaussian function in y, divided by the average of the best fitting
Gaussian functions in x and y. A circular source will have a zero
roundness. A source extended in x or y will have a negative or
positive roundness, respectively.
The sharpness statistic measures the ratio of the difference between
the height of the central pixel and the mean of the surrounding
non-bad pixels in the convolved image, to the height of the best
fitting Gaussian function at that point.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor to major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
(2.0*sqrt(2.0*log(2.0)))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Setting ``sky`` affects
only the output values of the object ``peak``, ``flux``, and
``mag`` values. The default is 0.0, which should be used to
replicate the results from `DAOFIND`_.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `DAOFIND`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`DAOStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
See Also
--------
IRAFStarFinder
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in `DAOFIND`_ are
``boundary='constant'`` and ``constant=0.0``.
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
References
----------
.. [1] Stetson, P. 1987; PASP 99, 191 (http://adsabs.harvard.edu/abs/1987PASP...99..191S)
.. [2] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
"""
def __init__(self, threshold, fwhm, ratio=1.0, theta=0.0,
sigma_radius=1.5, sharplo=0.2, sharphi=1.0, roundlo=-1.0,
roundhi=1.0, sky=0.0, exclude_border=False,
brightest=None, peakmax=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
self.threshold = threshold
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.kernel = _StarFinderKernel(self.fwhm, self.ratio, self.theta,
self.sigma_radius)
self.threshold_eff = self.threshold * self.kernel.relerr
self.brightest = brightest
self.peakmax = peakmax
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table`
A table of found stars with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``sharpness``: object sharpness.
* ``roundness1``: object roundness based on symmetry.
* ``roundness2``: object roundness based on marginal Gaussian
fits.
* ``npix``: the total number of pixels in the Gaussian kernel
array.
* ``sky``: the input ``sky`` parameter.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object flux calculated as the peak density in
the convolved image divided by the detection threshold. This
derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``. The derivation matches that of
`DAOFIND`_ if ``sky`` is 0.0.
If no stars are found then an empty table is returned.
.. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
"""
star_cutouts = _find_stars(data, self.kernel, self.threshold_eff,
mask=mask,
exclude_border=self.exclude_border)
self._star_cutouts = star_cutouts
columns = ('id', 'xcentroid', 'ycentroid', 'sharpness', 'roundness1',
'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag')
coltypes = (np.int_, np.float_, np.float_, np.float_, np.float_,
np.float_, np.int_, np.float_, np.float_, np.float_,
np.float_)
if len(star_cutouts) == 0:
warnings.warn('No sources were found.', AstropyUserWarning)
return Table(names=columns, dtype=coltypes)
star_props = []
for star_cutout in star_cutouts:
props = _DAOFind_Properties(star_cutout, self.kernel, self.sky)
if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any():
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness1 <= self.roundlo or
props.roundness1 >= self.roundhi):
continue
if (props.roundness2 <= self.roundlo or
props.roundness2 >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', AstropyUserWarning)
return Table(names=columns, dtype=coltypes)
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
for column in columns[1:]:
table[column] = [getattr(props, column) for props in star_props]
return table
class IRAFStarFinder(StarFinderBase):
"""
Detect stars in an image using IRAF's "starfind" algorithm.
`IRAFStarFinder` searches images for local density maxima that have
a peak amplitude greater than ``threshold`` above the local
background and have a PSF full-width at half-maximum similar to the
input ``fwhm``. The objects' centroid, roundness (ellipticity), and
sharpness are calculated using image moments.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the 2D circular Gaussian
kernel in units of pixels.
minsep_fwhm : float, optional
The minimum separation for detected objects in units of
``fwhm``.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Inputing a ``sky`` value
will override the background sky estimate. Setting ``sky``
affects only the output values of the object ``peak``, ``flux``,
and ``mag`` values. The default is ``None``, which means the
sky value will be estimated using the `starfind`_ method.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `starfind`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`IRAFStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in IRAF's `starfind`_ are
``boundary='constant'`` and ``constant=0.0``.
IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as
input parameters. The equivalent input values for
`IRAFStarFinder` are:
* ``fwhm = hwhmpsf * 2``
* ``sigma_radius = fradius * sqrt(2.0*log(2.0))``
* ``minsep_fwhm = 0.5 * sepmin``
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
See Also
--------
DAOStarFinder
References
----------
.. [1] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
"""
def __init__(self, threshold, fwhm, sigma_radius=1.5, minsep_fwhm=2.5,
sharplo=0.5, sharphi=2.0, roundlo=0.0, roundhi=0.2, sky=None,
exclude_border=False, brightest=None, peakmax=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
self.threshold = threshold
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.fwhm = fwhm
self.sigma_radius = sigma_radius
self.minsep_fwhm = minsep_fwhm
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.min_separation = max(2, int((self.fwhm * self.minsep_fwhm) +
0.5))
self.kernel = _StarFinderKernel(self.fwhm, ratio=1.0, theta=0.0,
sigma_radius=self.sigma_radius)
self.brightest = brightest
self.peakmax = peakmax
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table`
A table of found objects with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``fwhm``: object FWHM.
* ``sharpness``: object sharpness.
* ``roundness``: object roundness.
* ``pa``: object position angle (degrees counter clockwise from
the positive x axis).
* ``npix``: the total number of (positive) unmasked pixels.
* ``sky``: the local ``sky`` value.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object instrumental flux.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``.
If no stars are found then an empty table is returned.
.. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
"""
star_cutouts = _find_stars(data, self.kernel, self.threshold,
min_separation=self.min_separation,
mask=mask,
exclude_border=self.exclude_border)
self._star_cutouts = star_cutouts
columns = ('id', 'xcentroid', 'ycentroid', 'fwhm', 'sharpness',
'roundness', 'pa', 'npix', 'sky', 'peak', 'flux', 'mag')
coltypes = (np.int_, np.float_, np.float_, np.float_, np.float_,
np.float_, np.float_, np.int_, np.float_, np.float_,
np.float_, np.float_)
if len(star_cutouts) == 0:
warnings.warn('No sources were found.', AstropyUserWarning)
return Table(names=columns, dtype=coltypes)
star_props = []
for star_cutout in star_cutouts:
props = _IRAFStarFind_Properties(star_cutout, self.kernel,
self.sky)
# star cutout needs more than one non-zero value
if np.count_nonzero(props.data) <= 1:
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness <= self.roundlo or
props.roundness >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', AstropyUserWarning)
return Table(names=columns, dtype=coltypes)
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
for column in columns[1:]:
table[column] = [getattr(props, column) for props in star_props]
return table
| 35.104918 | 93 | 0.603741 |
28d5a712e692e4b74cc97f6ae7997625711bca69 | 511 | py | Python | tests/rules/test_java.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | tests/rules/test_java.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | tests/rules/test_java.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | import pytest
from thefeck.rules.java import match, get_new_command
from thefeck.types import Command
@pytest.mark.parametrize('command', [
Command('java bar.java', ''),
Command('java bar.java', '')])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('java bar.java', ''), 'java bar'),
(Command('java bar.java', ''), 'java bar')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| 28.388889 | 53 | 0.700587 |
5b77aa0054b3444766c6523bada6d68236c28cbb | 3,098 | py | Python | tests/functional_tests/daemon_info.py | Aifolin/motifcoin | 82c3c5378240f43e6cfde762c4c2dbc92b645cc3 | [
"MIT"
] | null | null | null | tests/functional_tests/daemon_info.py | Aifolin/motifcoin | 82c3c5378240f43e6cfde762c4c2dbc92b645cc3 | [
"MIT"
] | null | null | null | tests/functional_tests/daemon_info.py | Aifolin/motifcoin | 82c3c5378240f43e6cfde762c4c2dbc92b645cc3 | [
"MIT"
] | 1 | 2019-08-05T13:04:45.000Z | 2019-08-05T13:04:45.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Motif Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test daemon RPC calls
Test the following RPCs:
- get_info
- hard_fork_info
"""
from __future__ import print_function
from framework.daemon import Daemon
class DaemonGetInfoTest():
def run_test(self):
self._test_hardfork_info()
self._test_get_info()
def _test_hardfork_info(self):
print('Test hard_fork_info')
daemon = Daemon()
res = daemon.hard_fork_info()
# hard_fork version should be set at height 1
assert 'earliest_height' in res.keys()
#assert res['earliest_height'] == 1;
assert res.earliest_height == 1
def _test_get_info(self):
print('Test get_info')
daemon = Daemon()
res = daemon.get_info()
# difficulty should be set to 1 for this test
assert 'difficulty' in res.keys()
assert res.difficulty == 1;
# nettype should not be TESTNET
assert 'testnet' in res.keys()
assert res.testnet == False;
# nettype should not be STAGENET
assert 'stagenet' in res.keys()
assert res.stagenet == False;
# nettype should be FAKECHAIN
assert 'nettype' in res.keys()
assert res.nettype == "fakechain";
# free_space should be > 0
assert 'free_space' in res.keys()
assert res.free_space > 0
# height should be greater or equal to 1
assert 'height' in res.keys()
assert res.height >= 1
if __name__ == '__main__':
DaemonGetInfoTest().run_test()
| 34.043956 | 89 | 0.699484 |
5900ec63d2c5e9b2d42184e1b3ae1ee6fa5be0d5 | 55 | py | Python | utils/__init__.py | scimk/path_deepzoom | 2d3012082527df81998d7913ea04cb01664f3650 | [
"MIT"
] | null | null | null | utils/__init__.py | scimk/path_deepzoom | 2d3012082527df81998d7913ea04cb01664f3650 | [
"MIT"
] | null | null | null | utils/__init__.py | scimk/path_deepzoom | 2d3012082527df81998d7913ea04cb01664f3650 | [
"MIT"
] | null | null | null | __all__ = ["config", "crossdomains", "db", "deepzoom"]
| 27.5 | 54 | 0.636364 |
ccee1afc6c6f85bd2bb6ee744f2f9e326d81ae15 | 6,184 | py | Python | optimus/engines/ibis/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | optimus/engines/ibis/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | optimus/engines/ibis/columns.py | niallscc/Optimus | 35218401556e5acc4beb2859084128ebcd1ab4e5 | [
"Apache-2.0"
] | null | null | null | import re
import pandas as pd
from ibis.expr.types import TableExpr
from sklearn import preprocessing
from optimus.engines.base.commons.functions import impute, string_to_index, index_to_string
from optimus.engines.base.dataframe.columns import DataFrameBaseColumns
from optimus.helpers.columns import parse_columns, prepare_columns
from optimus.helpers.constants import Actions
from optimus.helpers.converter import format_dict
from optimus.helpers.core import val_to_list
from optimus.infer import is_str, is_tuple, is_dict
DataFrame = TableExpr
class Cols(DataFrameBaseColumns):
def __init__(self, df):
super(DataFrameBaseColumns, self).__init__(df)
def _names(self):
return list(self.root.data.columns)
def append(self, dfs):
"""
:param dfs:
:return:
"""
dfd = self.root.data
dfd = pd.concat([dfs.data.reset_index(drop=True), dfd.reset_index(drop=True)], axis=1)
return self.root.new(dfd)
def dtypes(self, columns="*"):
df = self.root
columns = parse_columns(df, columns)
return dict(df.data[columns].schema().items())
def agg_exprs(self, columns, funcs, *args, compute=True, tidy=True):
df = self.root
columns = parse_columns(df, columns)
funcs = val_to_list(funcs)
all_funcs = []
for col_name in columns:
for func in funcs:
all_funcs.append({func.__name__: {col_name: self.exec_agg(func(df.data[col_name], *args))}})
result = {}
for i in all_funcs:
for x, y in i.items():
result.setdefault(x, {}).update(y)
return format_dict(result, tidy)
@staticmethod
def exec_agg(exprs, compute=None):
"""
Execute an aggregation
:param exprs: Aggreagtion function to process
:return:
"""
if is_dict(exprs):
result = exprs
else:
result = exprs.execute()
return result
@staticmethod
def to_timestamp(input_cols, date_format=None, output_cols=None):
pass
def impute(self, input_cols, data_type="continuous", strategy="mean", fill_value=None, output_cols=None):
df = self.root
return impute(df, input_cols, data_type="continuous", strategy="mean", output_cols=None)
@staticmethod
def astype(*args, **kwargs):
pass
def apply(self, input_cols, func=None, func_return_type=None, args=None, func_type=None, when=None,
filter_col_by_dtypes=None, output_cols=None, skip_output_cols_processing=False,
meta_action=Actions.APPLY_COLS.value, mode="pandas", set_index=False, default=None, **kwargs):
columns = prepare_columns(self.root, input_cols, output_cols, filter_by_column_dtypes=filter_col_by_dtypes,
accepts_missing_cols=True, default=default)
kw_columns = {}
if args is None:
args = (None,)
elif not is_tuple(args, ):
args = (args,)
for input_col, output_col in columns:
# print("args",args)
kw_columns.update({output_col: func(self.root.data[input_col], *args)})
return self.root.new(self.root.data.mutate(**kw_columns))
@staticmethod
def find(columns, sub, ignore_case=False):
"""
Find the start and end position for a char or substring
:param columns:
:param ignore_case:
:param sub:
:return:
"""
df = self
columns = parse_columns(df, columns)
sub = val_to_list(sub)
def get_match_positions(_value, _separator):
result = None
if is_str(_value):
# Using re.IGNORECASE in finditer not seems to work
if ignore_case is True:
_separator = _separator + [s.lower() for s in _separator]
regex = re.compile('|'.join(_separator))
length = [[match.start(), match.end()] for match in
regex.finditer(_value)]
result = length if len(length) > 0 else None
return result
for col_name in columns:
# Categorical columns can not handle a list inside a list as return for example [[1,2],[6,7]].
# That could happened if we try to split a categorical column
# df[col_name] = df[col_name].astype("object")
df[col_name + "__match_positions__"] = df[col_name].astype("object").apply(get_match_positions,
args=(sub,))
return df
@staticmethod
def scatter(columns, buckets=10):
pass
def count_by_dtypes(self, columns, dtype):
df = self.root
result = {}
df_len = len(df)
for col_name, na_count in df.cols.count_na(columns, tidy=False)["count_na"].items():
# for i, j in df.constants.DTYPES_DICT.items():
# if j == df[col_name].dtype.type:
# _dtype = df.constants.SHORT_DTYPES[i]
# _dtype = df.cols.dtypes(col_name)[col_name]
mismatches_count = df.cols.is_match(col_name, dtype).value_counts().to_dict().get(False)
mismatches_count = 0 if mismatches_count is None else mismatches_count
result[col_name] = {"match": df_len - na_count, "missing": na_count,
"mismatch": mismatches_count - na_count}
return result
@staticmethod
def correlation(input_cols, method="pearson", output="json"):
pass
@staticmethod
def qcut(columns, num_buckets, handle_invalid="skip"):
pass
def string_to_index(self, input_cols=None, output_cols=None, columns=None):
df = self.df
le = preprocessing.LabelEncoder()
df = string_to_index(df, input_cols, output_cols, le)
return df
def index_to_string(self, input_cols=None, output_cols=None, columns=None):
df = self.df
le = preprocessing.LabelEncoder()
df = index_to_string(df, input_cols, output_cols, le)
return df
| 34.741573 | 115 | 0.610608 |
91094a2fe699e4a7a6d59761e70d97bafdedf6cc | 24,700 | py | Python | veikkaaja/veikkaus_client.py | miikama/veikkaaja | 65d507182baddb113dff0d5a43d1c15954e5a2ba | [
"MIT"
] | null | null | null | veikkaaja/veikkaus_client.py | miikama/veikkaaja | 65d507182baddb113dff0d5a43d1c15954e5a2ba | [
"MIT"
] | 1 | 2021-04-20T11:08:29.000Z | 2021-11-12T18:03:17.000Z | veikkaaja/veikkaus_client.py | miikama/veikkaaja | 65d507182baddb113dff0d5a43d1c15954e5a2ba | [
"MIT"
] | null | null | null | """Main veikkaus client module"""
import json
import os
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Union
import requests
from veikkaaja import logger
from veikkaaja.endpoints import EndPoint
from veikkaaja.responses import ResponseType, parse_response
from veikkaaja.types import GameTypes, ParseableEnum
class BetTarget(Enum):
"""Currently only 1x2 supported"""
HOME = "HOME"
X = "X"
AWAY = "AWAY"
class BetDecision(NamedTuple):
"""Currently only 1x2 supported"""
# what to be
target: BetTarget
# how much to bet in cents
amount: int
class EBETType(ParseableEnum):
"""
enumartions of possible game types in EBET game response
"""
ONE_X_TWO = "1X2"
ONE_TWO = "12"
HOME_HANDICAP = "HOME_HANDICAP"
AWAY_HANDICAP = "AWAY_HANDICAP"
OVER_UNDER = "OVER_UNDER"
OUTRIGHT_SHORT_TERM = "OUTRIGHT_SHORT_TERM"
class Game:
"""A class for holding EBET event information"""
# pylint:disable=too-many-instance-attributes
# This is intended just as a wrapper to hold the
# data in the API response
home_team = ""
away_team = ""
home_odds = 0.0
away_odds = 0.0
draw_odds = 0.0
event_id = 0
row_id = 0
# TODO: removed from the response, consider storing hte gametype
# e.g. EBET here
draw_type: Union[EBETType, None] = None
status = ""
list_index = 0
close_time = datetime.fromtimestamp(0)
league = ""
sport_id = 0
min_stake = 0
def __init__(self, client: 'VeikkausClient'):
""""""
self._client: VeikkausClient = client
def place_bet(self, bet: BetDecision):
"""Given amount in cents, bet for target."""
self._client.place_bet(self, bet)
def __repr__(self):
"""Make nicer output"""
close_str = self.close_time.strftime("%d.%m.%Y %H:%M")
return f"{self.__class__.__name__:} type: 'EBET' listindex: {self.list_index} {close_str} {self.league}: {self.home_team:15} - {self.away_team:15} id: {self.row_id} event_id: {self.event_id} status: {self.status}, odds: ({self.home_odds:6} - {self.draw_odds:6} - {self.away_odds:6} min_stake: {self.min_stake})" #pylint:disable=line-too-long
class EventInfo:
"""A wrapper to keep information of the EBET draws"""
league = ""
external_id = ""
def __repr__(self):
return f"{self.__class__.__name__}: league: {self.league}, external_id: {self.external_id}"
class TransActionType(Enum):
"""A enumeration of all possible transaction types"""
WIN = "WIN"
LOSS = "LOSS"
BUY = "BUY"
class Wager(NamedTuple):
"""The result of a query for transactions"""
external_id: str
id: int
accounting_date: datetime
amount: int
result: TransActionType
product: GameTypes
class VeikkausClient:
"""The main client that holds on the API session"""
API_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
"X-ESA-API-Key": "ROBOT"
}
def __init__(self, account="", password=""):
"""
Arguments:
account (str): Name of the account or empty if empty
account name is loaded from VEIKKAUS_ACCOUNT
environment variable.
password (str): account password. If empty, loaded from
VEIKKAUS_PASSWORD environment variable
"""
acc_password = password
if not acc_password:
if "VEIKKAUS_PASSWORD" not in os.environ:
raise RuntimeError("Missing account authentication information")
acc_password = os.environ['VEIKKAUS_PASSWORD']
acc = account
if not acc:
if "VEIKKAUS_ACCOUNT" not in os.environ:
raise RuntimeError("Missing account authentication information")
acc = os.environ['VEIKKAUS_ACCOUNT']
self.session = self.login(acc, acc_password)
def _access_endpoint(self,
endpoint: EndPoint,
payload: Dict[str, Any] = None,
method="GET") -> Union[requests.Response, None]:
"""
A common wrapper for sending and logging API requests
Arguments:
endpoint: the url of the endpoint
payload: dictionary of the query parameters
method: GET or POST
"""
payload = {} if payload is None else payload
if not self.session:
logger.warning("No active session for accessing '%s'.", endpoint.endpoint)
return None
# log sending out a request
payload_text = f"\n{json.dumps(payload, indent=4)}" if payload else ""
logger.info("\033[93mSending\033[0m %s %s", method, endpoint.url)
logger.debug("payload is:\n%s", payload_text)
self.save_outgoing_request(endpoint, payload)
if method == "GET":
response = self.session.get(
endpoint.url, headers=self.API_HEADERS, params=payload)
elif method == "POST":
response = self.session.post(
endpoint.url, headers=self.API_HEADERS, json=payload)
else:
raise RuntimeError(f"Unsupported method {method}")
self.save_incoming_response(endpoint, response)
if response.status_code != 200:
# log out the error
logger.error("\033[91mRequest failed\033[0m %s, %s. URL: %s",
response.status_code, response.reason, response.url)
# RED debug log entry
if response.content:
logger.debug("\033[91mInvalid request:\033[0m\n%s", response.content)
return None
# green dedub log entry, the responses are quite large
logger.info("\033[92mResponse OK\033[0m from %s", endpoint.endpoint)
logger.debug("\033[92mReceived:\033[0m\n%s",
json.dumps(response.json(), indent=4))
return response
def save_outgoing_request(self, endpoint: EndPoint, payload: Dict[Any, Any]):
"""For testing, add and interface for saving the outgoing messages."""
def save_incoming_response(self, endpoint: EndPoint, response: requests.Response):
"""For testing, add and interface for saving the incoming responses."""
def login(self, account: str, password: str):
"""
Starts and returns a requests session.
Returns:
requests.Session or None if login failed.
"""
login_payload = {"type": "STANDARD_LOGIN", "login": account, "password": password}
logger.info("Trying to log in...")
logger.info("\033[93mSending\033[0m %s %s", "GET",
EndPoint.login_endpoint().endpoint)
session = requests.Session()
response = session.post(
EndPoint.login_endpoint(),
data=json.dumps(login_payload),
headers=self.API_HEADERS)
if response.status_code != 200:
logger.error("Cannot login")
return None
logger.info("\033[92mResponse OK\033[0m Succesfully logged in!")
return session
def get_balance(self, balance="usableBalance"):
"""Return the account balance
Args:
balance (str): the type of balance to return, options are
('balance', 'usableBalance', 'frozenBalance')
"""
assert balance in ('balance', 'usableBalance',
'frozenBalance'), "Invalid balance type"
response = self._access_endpoint(EndPoint.account_info_endpoint(), method="GET")
if response is None:
return 0
cash = response.json().get('balances', {}).get('CASH', {})
logger.info("Account has balance: total: %s €, frozen: %s €, usable: %s €",
cash.get('balance', 0) / 100,
cash.get('frozenBalance', 0) / 100,
cash.get('usableBalance', 0) / 100)
# return the requested balance
return cash.get(balance, 0) / 100
def get_betting_history(self, maximum_results=50, sort_by='TXDATE') -> List[Wager]:
"""Return the betting history
Arguments:
maximum_results int: max number of results sorted by sort_by
This has to be below 50
sort_by: Either 'TX_DATE' or 'RESULT_DATE'
multiple values for sort_by are available, but might not work
https://github.com/VeikkausOy/sport-games-robot/issues/95
"""
assert sort_by in ('TXDATE', 'RESULT_DATE'), "Invalid sort_by"
assert 0 <= maximum_results <= 50, "Queried result count should be between 0 and 50."
payload = {'size': maximum_results, 'sort-by': sort_by}
response = self._access_endpoint(
EndPoint.account_betting_history(), method="GET", payload=payload)
if response is None:
return []
return parse_response(response.json(), ResponseType.TRANSACTION_LIST)
def get_bet_event_information(self, event: Wager):
"""Return the more thorough information
for the bet with the argument id. Wager can
be obtained from the results of get_betting_history()
Arguments:
event: the wager, one of the results of from the results of
get_betting_history()
"""
response = self._access_endpoint(
EndPoint.wager_information(event.external_id), method="GET")
if response is None:
return []
return []
def upcoming_events(self, game_type: GameTypes) -> List[Game]:
"""Get upcoming games"""
payload = {'game-names': game_type.value}
response = self._access_endpoint(
EndPoint.games_info_endpoint(), payload=payload, method="GET")
if not response:
return []
data = response.json()
if game_type == GameTypes.EBET:
return self.parse_draws(data)
logger.warning("Not yet implemented game type: %s", game_type.value)
return []
def parse_draws(self, data: Dict):
"""
API response:
"draws": [
{
"gameName": "EBET",
"brandName": "838",
"id": "2143963",
"name": "SINGLE",
"status": "OPEN",
"openTime": 1600398000000,
"closeTime": 1600887480000,
"drawTime": 1600887600000,
"resultsAvailableTime": 1600894799000,
"gameRuleSet": {
"basePrice": 100,
"maxPrice": 1000000,
"stakeInterval": 10,
"minStake": 10,
"maxStake": 100000,
"minSystemLevel": 1,
"maxSystemLevel": 10,
"oddsType": "FIXED"
},
"rows": [
{
"id": "1",
"status": "OPEN",
"includedRowCount": 32,
"name": "",
"description": "",
"detailedDescription": "1/2",
"tvChannel": "",
"competitors": [
{
"id": "1",
"name": "Olympiakos",
"number": 133,
"odds": {
"odds": 132
},
"status": "ACTIVE",
"handicap": "0.00"
},
{
"id": "2",
"name": "Omonoia",
"number": 313,
"odds": {
"odds": 860
},
"status": "ACTIVE"
},
{
"id": "3",
"name": "Tasapeli",
"odds": {
"odds": 440
},
"status": "ACTIVE"
}
],
"eventId": "98723990",
"excludedEvents": [
"98723990"
],
"type": "1X2",
"sportId": "1",
"externalId": "0"
}
]
},
"""
games = []
for entry in data:
game = Game(self)
game.row_id = entry.get('id')
game.list_index = entry.get('listIndex')
game.status = entry.get('status')
game.close_time = datetime.fromtimestamp(entry.get('closeTime', 0) / 1000)
game.min_stake = entry.get('gameRuleSet', {}).get('minStake', 0)
for row in entry.get('rows', []):
game.event_id = row.get('eventId')
game.status = row.get('status')
game.sport_id = row.get('sportId')
game.draw_type = EBETType.parse(row.get('type'))
for comp in row.get('competitors', []):
if comp.get('id') == "1":
game.home_team = comp.get('name')
game.home_odds = float(comp.get('odds').get('odds'))
if comp.get('id') == "2":
game.away_team = comp.get('name')
game.away_odds = float(comp.get('odds').get('odds'))
if comp.get('id') == "3":
game.draw_odds = float(comp.get('odds').get('odds'))
games.append(game)
games = sorted(games, key=lambda game: game.close_time)
return games
def sport_types(self) -> List[Dict[str, str]]:
"""query available sport type ids:
API Response:
[
{
"id": "7",
"name": "Salibandy"
},
{
"id": "48",
"name": "Arvontapelit"
},
{
"id": "25",
"name": "Kamppailulajit"
}
]
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.sport_type_code_endpoint(), payload, method="GET")
if not response:
return []
return response.json()
def sport_categories(self, sport_id: int) -> List[Dict[str, str]]:
"""
query available sport type subgateries
e.g. for football query different countries
that have football leagues.
API Response:
{
"id": "1",
"name": "Jalkapallo",
"categories": [
{
"id": "1",
"name": "Suomi"
},
{
"id": "2",
"name": "Englanti"
},
{
"id": "3",
"name": "Italia"
},
}
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.sport_categories_endpoint(sport_id), payload, method="GET")
if not response:
return []
return response.json()
def sport_tournaments(self, sport_id: int,
sport_category_id: int) -> List[Dict[str, str]]:
"""
query available tournaments for sport type subgateries
e.g. for football query different countries
that have football leagues.
API Response:
{
"id": "2",
"name": "Englanti",
"tournaments": [
{
"id": "1",
"name": "Valioliiga"
},
{
"id": "2",
"name": "Mestaruussarja"
},
{
"id": "3",
"name": "Ykk\u00f6sliiga"
},
]
}
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.sport_tournaments_endpoint(sport_id, sport_category_id),
payload,
method="GET")
if not response:
return []
return response.json()
def sport_tournament_info(self, sport_id: int, sport_category_id: int,
sport_tournament_id) -> List[Dict[str, str]]:
"""
query available tournaments for sport type subgateries
e.g. for football query different countries
that have football leagues.
API Response:
{
"id": "1",
"name": "Valioliiga",
"events": [
{
"id": "94772195",
"name": "Crystal P - Bournemouth",
"date": 1557669600000
},
...
],
"teams": [
{
"id": "60",
"name": "Huddersfield",
"shortName": "Huddersfield"
},
{
"id": "446",
"name": "Hull",
"shortName": "Hull"
},
...
],
}
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.sport_tournament_info_endpoint(sport_id, sport_category_id,
sport_tournament_id),
payload,
method="GET")
if not response:
return []
return response.json()
def event_info(self, event_id: int) -> Union[EventInfo, None]:
"""Query more specific information for the event
API response:
{
"id": "98587029",
"name": "Liverpool - Arsenal",
"sportId": "1",
"sportName": "Jalkapallo",
"categoryId": "2",
"categoryName": "Englanti",
"tournamentId": "1",
"tournamentName": "Valioliiga",
"teams": [
{
"id": "1",
"name": "Arsenal",
"shortName": "Arsenal"
},
{
"id": "9",
"name": "Liverpool",
"shortName": "Liverpool"
}
],
"date": 1601319600000,
"externalId": "23203829",
"hasLiveBetting": false
}
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.single_event_info_endpoint(event_id), payload, method="GET")
if not response:
return None
data = response.json()
event = EventInfo()
event.league = data.get('tournamentName')
event.external_id = data.get('externalId')
return event
def draw_info(self, draw_id: int) -> Union[EventInfo, None]:
"""Query more specific information for a single draw
API response:
{
"id": "98587029",
"name": "Liverpool - Arsenal",
"sportId": "1",
"sportName": "Jalkapallo",
"categoryId": "2",
"categoryName": "Englanti",
"tournamentId": "1",
"tournamentName": "Valioliiga",
"teams": [
{
"id": "1",
"name": "Arsenal",
"shortName": "Arsenal"
},
{
"id": "9",
"name": "Liverpool",
"shortName": "Liverpool"
}
],
"date": 1601319600000,
"externalId": "23203829",
"hasLiveBetting": false
}
"""
payload = {'lang': "fi"}
response = self._access_endpoint(
EndPoint.single_draw_info_endpoint(draw_id), payload, method="GET")
if not response:
return None
data = response.json()
event = EventInfo()
event.league = data.get('tournamentName')
event.external_id = data.get('externalId')
return event
def place_bet(self, game: Game, bet: BetDecision, test=True) -> bool:
"""Place a bet, bet amount in cents
Arguments:
game: which draw to place the bet for
bet: what to bet
test: (optional) whether to use the API test endpoint
which does not actually place the bet, just checks
that it could have been placed
"""
endpoint = EndPoint.place_wager_endpoint()
if test:
endpoint = EndPoint.place_wager_test_endpoint()
payload = self.ebet_payload([game], [bet])
response = self._access_endpoint(endpoint, payload=payload, method="POST")
if not response:
return False
return True
@staticmethod
def ebet_payload(games: List[Game], bets: List[BetDecision]) -> Dict[str, Any]:
"""
Payload for ebet wager:
https://github.com/VeikkausOy/sport-games-robot/blob/master/doc/ebet-single-wager-request.json
API payload:
[
{
"gameName": "EBET",
"requestId": "request-19",
"selections": [
{
"betType": "SINGLE",
"competitors": {
"main": [
"1"
],
"spare": [
"310"
]
},
"rowId": "150410",
"stake": 100,
"systemBetType": "SYSTEM"
},
{
"betType": "SINGLE",
"competitors": {
"main": [
"3"
],
"spare": [
"220"
]
},
"rowId": "150411",
"stake": 100,
"systemBetType": "SYSTEM"
}
],
"type": "NORMAL"
}
]
"""
assert len(games) == len(bets), "Number of games has to match number of bets"
def selected_play(target: BetTarget):
if target == BetTarget.HOME:
return 1
if target == BetTarget.X:
return 3
if target == BetTarget.AWAY:
return 2
raise TypeError(f"invalid bet target {target.value}")
# calculate the total price by summing all bets together
total_price = sum(map(lambda bet: bet.amount, bets))
game_data = {
"gameName": GameTypes.EBET.value, # pylint: disable=no-member
"price": total_price,
"boards": []
}
# Fill the bet for each game under 'boards'
# specify the stake for each bet target individually
for game, bet in zip(games, bets):
data = {
"betType": "normal",
"stake": total_price,
"selections": [
{
"listIndex": game.list_index,
"competitors": [selected_play(bet.target)],
"stake": bet.amount
}
]
}
game_data['boards'].append(data) # type: ignore
return game_data
| 32.585752 | 350 | 0.469514 |
3af7332806b38477b5200aeae9fa1233d3b6de56 | 620 | py | Python | studies/migrations/0071_remove_ordering_from_response.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 9 | 2018-06-26T17:15:27.000Z | 2021-11-21T17:19:01.000Z | studies/migrations/0071_remove_ordering_from_response.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 496 | 2018-02-19T19:18:24.000Z | 2022-03-31T17:01:16.000Z | studies/migrations/0071_remove_ordering_from_response.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 16 | 2018-07-06T23:35:39.000Z | 2021-11-21T17:52:58.000Z | # Generated by Django 3.0.14 on 2021-06-29 18:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("studies", "0070_auto_20210521_0632"),
]
operations = [
migrations.AlterModelOptions(
name="response",
options={
"base_manager_name": "related_manager",
"permissions": (
(
"view_all_response_data_in_analytics",
"View all response data in analytics",
),
),
},
),
]
| 23.846154 | 62 | 0.487097 |
0bda228682f4c17f72575843529b6586900a48d8 | 7,318 | py | Python | python/ray/tests/test_advanced_5.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_advanced_5.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_advanced_5.py | jianoaix/ray | 1701b923bc83905f8961c06a6a173e3eba46a936 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import logging
import sys
import time
import numpy as np
import pytest
import ray.cluster_utils
from ray._private.test_utils import (
client_test_enabled,
SignalActor,
)
if client_test_enabled():
from ray.util.client import ray
else:
import ray
logger = logging.getLogger(__name__)
def test_task_arguments_inline_bytes_limit(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"max_direct_call_object_size": 100 * 1024,
# if task_rpc_inlined_bytes_limit is greater than
# max_grpc_message_size, this test fails.
"task_rpc_inlined_bytes_limit": 18 * 1024,
"max_grpc_message_size": 20 * 1024,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"pin_worker": 1})
def foo(ref1, ref2, ref3):
return ref1 == ref2 + ref3
@ray.remote(resources={"pin_head": 1})
def bar():
# if the refs are inlined, the test fails.
# refs = [ray.put(np.random.rand(1024) for _ in range(3))]
# return ray.get(
# foo.remote(refs[0], refs[1], refs[2]))
return ray.get(
foo.remote(
np.random.rand(1024), # 8k
np.random.rand(1024), # 8k
np.random.rand(1024),
)
) # 8k
ray.get(bar.remote())
# This case tests whether gcs-based actor scheduler works properly with
# a normal task co-existed.
def test_schedule_actor_and_normal_task(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
cluster.add_node(
memory=1024 ** 3, _system_config={"gcs_actor_scheduling_enabled": True}
)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=600 * 1024 ** 2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=600 * 1024 ** 2, num_cpus=0.01)
def fun(singal1, signal_actor2):
signal_actor2.send.remote()
ray.get(singal1.wait.remote())
return 1
singal1 = SignalActor.remote()
signal2 = SignalActor.remote()
o1 = fun.remote(singal1, signal2)
# Make sure the normal task is executing.
ray.get(signal2.wait.remote())
# The normal task is blocked now.
# Try to create actor and make sure this actor is not created for the time
# being.
foo = Foo.remote()
o2 = foo.method.remote()
ready_list, remaining_list = ray.wait([o2], timeout=2)
assert len(ready_list) == 0 and len(remaining_list) == 1
# Send a signal to unblock the normal task execution.
ray.get(singal1.send.remote())
# Check the result of normal task.
assert ray.get(o1) == 1
# Make sure the actor is created.
assert ray.get(o2) == 2
# This case tests whether gcs-based actor scheduler works properly
# in a large scale.
def test_schedule_many_actors_and_normal_tasks(ray_start_cluster):
cluster = ray_start_cluster
node_count = 10
actor_count = 50
each_actor_task_count = 50
normal_task_count = 1000
node_memory = 2 * 1024 ** 3
for i in range(node_count):
cluster.add_node(
memory=node_memory,
_system_config={"gcs_actor_scheduling_enabled": True} if i == 0 else {},
)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01)
def fun():
return 1
normal_task_object_list = [fun.remote() for _ in range(normal_task_count)]
actor_list = [Foo.remote() for _ in range(actor_count)]
actor_object_list = [
actor.method.remote()
for _ in range(each_actor_task_count)
for actor in actor_list
]
for object in ray.get(actor_object_list):
assert object == 2
for object in ray.get(normal_task_object_list):
assert object == 1
# This case tests whether gcs actor scheduler distributes actors
# in a balanced way if using `SPREAD` policy.
@pytest.mark.parametrize("args", [[5, 20], [5, 3]])
def test_actor_distribution_balance(ray_start_cluster_enabled, args):
cluster = ray_start_cluster_enabled
node_count = args[0]
actor_count = args[1]
for i in range(node_count):
cluster.add_node(
memory=1024 ** 3,
_system_config={"gcs_actor_scheduling_enabled": True} if i == 0 else {},
)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024 ** 2, num_cpus=0.01, scheduling_strategy="SPREAD")
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
actor_distribution = {}
actor_list = [Foo.remote() for _ in range(actor_count)]
for actor in actor_list:
node_id = ray.get(actor.method.remote())
if node_id not in actor_distribution.keys():
actor_distribution[node_id] = []
actor_distribution[node_id].append(actor)
if node_count >= actor_count:
assert len(actor_distribution) == actor_count
for node_id, actors in actor_distribution.items():
assert len(actors) == 1
else:
assert len(actor_distribution) == node_count
for node_id, actors in actor_distribution.items():
assert len(actors) <= int(actor_count / node_count)
# This case tests whether RequestWorkerLeaseReply carries normal task resources
# when the request is rejected (due to resource preemption by normal tasks).
def test_worker_lease_reply_with_resources(ray_start_cluster_enabled):
cluster = ray_start_cluster_enabled
cluster.add_node(
memory=2000 * 1024 ** 2,
num_cpus=1,
_system_config={
"gcs_resource_report_poll_period_ms": 1000000,
"gcs_actor_scheduling_enabled": True,
},
)
node2 = cluster.add_node(memory=1000 * 1024 ** 2, num_cpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=1500 * 1024 ** 2, num_cpus=0.01)
def fun(signal):
signal.send.remote()
time.sleep(30)
return 0
signal = SignalActor.remote()
fun.remote(signal)
# Make sure that the `fun` is running.
ray.get(signal.wait.remote())
@ray.remote(memory=800 * 1024 ** 2, num_cpus=0.01)
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
foo1 = Foo.remote()
o1 = foo1.method.remote()
ready_list, remaining_list = ray.wait([o1], timeout=10)
# If RequestWorkerLeaseReply carries normal task resources,
# GCS will then schedule foo1 to node2. Otherwise,
# GCS would keep trying to schedule foo1 to
# node1 and getting rejected.
assert len(ready_list) == 1 and len(remaining_list) == 0
assert ray.get(o1) == node2.unique_id
if __name__ == "__main__":
import os
import pytest
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| 30.365145 | 84 | 0.650041 |
929fcbf2f7ed5d2d592e695c89e7b2836cb3fe9b | 4,399 | py | Python | exporter/actions.py | nhsuk/nhsuk-content-store | a96e3e57db34247c9d1dea64437c6bbcd7926fce | [
"MIT"
] | 24 | 2016-10-20T15:24:49.000Z | 2022-03-15T18:53:44.000Z | exporter/actions.py | nhsuk/nhsuk-content-store | a96e3e57db34247c9d1dea64437c6bbcd7926fce | [
"MIT"
] | 47 | 2016-10-12T15:45:27.000Z | 2017-04-05T11:15:28.000Z | exporter/actions.py | nhsuk/nhsuk-content-store | a96e3e57db34247c9d1dea64437c6bbcd7926fce | [
"MIT"
] | 6 | 2017-03-03T14:33:27.000Z | 2021-10-07T20:32:34.000Z | import json
import logging
import os
from datetime import timedelta
from bakery.views import BuildableMixin
from django.core.urlresolvers import reverse
from django.utils.timezone import now
from oauth2_provider.models import AccessToken
from rest_framework.test import APIClient
from pages.models import Page
from .components import StructuralComponent
logger = logging.getLogger(__name__)
class BakeryPageView(BuildableMixin):
CONTENT_AREAS = ['header', 'main']
def __init__(self, build_path):
super().__init__()
self.build_path = build_path
def transform_content(self, obj, raw_content):
"""
Transforms the content returned by the API into something that the frontend expects.
This is because there are some differences between the frontend REST handler and the filesystem one
(e.g. images with different formats etc.)
"""
content = json.loads(raw_content.decode('utf-8'))
context = {
'page': obj,
'root_path': self.build_path,
'item_base_path': self.get_item_base_path(obj),
'new_files': []
}
component_exporter = StructuralComponent(context)
for area in self.CONTENT_AREAS:
content_area = content.get('content', {}).get(area, [])
if content_area:
content['content'][area] = component_exporter.transform_components(content_area)
content_files = context['new_files']
content_files.append(
(
os.path.join(context['item_base_path'], 'manifest.json'),
json.dumps(content, indent=2, sort_keys=True)
)
)
return content_files
def build_objects(self, ids, include_children=False):
"""
Exports the live pages with id == `ids` including their children if `include_children` == True.
"""
for page in Page.objects.live().filter(id__in=ids):
self.build_object(page, include_children=include_children)
def build_object(self, obj, include_children=False):
"""
Exports the live page `obj` including its children if `include_children` == True.
"""
logger.debug("Building %s" % obj)
obj = obj.specific
client = APIClient(SERVER_NAME='localhost')
client.handler._force_token = self.get_auth_token()
response = client.get(self.get_url(obj))
content_files = self.transform_content(obj, response.content)
for path, content in content_files:
self.build_file(path, content)
if include_children:
for child in obj.get_live_children():
self.build_object(child, include_children=include_children)
def build_file(self, path, content, *args, **kargs):
"""
Saves the `content` in a file with the given `path`.
"""
folder_path = os.path.dirname(path)
os.path.exists(folder_path) or os.makedirs(folder_path)
# if file
if hasattr(content, 'file'):
content.file.open('rb')
with open(path, 'wb+') as destination:
for chunk in content.file.chunks():
destination.write(chunk)
return
# if text
content = content.encode('utf-8')
return super().build_file(path, content, *args, **kargs)
def get_url(self, obj):
"""
Returns the url to the page detail API for the object `obj`.
"""
return reverse('wagtailapi:pages:detail', kwargs={'pk': obj.pk})
def get_auth_token(self):
"""
Instantiate a valid auth token to be used for the request.
"""
return AccessToken(
scope='read',
expires=now() + timedelta(days=1)
)
def get_item_base_path(self, obj):
"""
Returns the path to the folder that will contain the export of the object `obj`.
It creates those folder if they don't exist.
"""
path = os.path.join(self.build_path, obj.url[1:])
os.path.exists(path) or os.makedirs(path)
return path
def export(build_dir, page_ids):
"""
Exports the live pages with id == `page_ids` to the folder `build_dir` including their children pages.
"""
BakeryPageView(build_dir).build_objects(
page_ids, include_children=True
)
| 33.075188 | 107 | 0.624233 |
49594de97bc52d1cd641847978bf5afc2e54364d | 840 | py | Python | hyperspace.py | wotreeclapton/Samroiyod_game | eaaad029aca8ca88463f0349ebbf0dc4be890e53 | [
"MIT"
] | null | null | null | hyperspace.py | wotreeclapton/Samroiyod_game | eaaad029aca8ca88463f0349ebbf0dc4be890e53 | [
"MIT"
] | null | null | null | hyperspace.py | wotreeclapton/Samroiyod_game | eaaad029aca8ca88463f0349ebbf0dc4be890e53 | [
"MIT"
] | null | null | null | #! python 3
'''
SAMROIYOD GAME HYPERSPACE MODULE developed by Mr Steven J walden
Jan. 2020
SAMROIYOD, PRACHUAP KIRI KHAN, THAILAND
Some of the sounds in this project were created by David McKee (ViRiX) soundcloud.com/virix
[See License.txt file]
'''
from os import environ
from random import randrange
import pygame
import methods as meth
from methods import change_dir
import sprites
from sprites import Player1, Player2, Player1Bullet, Player2Bullet, MobBullet, Mob, Boss, Explosion, HyperMob
def hyperspace(self):
# with change_dir('snd'): #set music
# pg.mixer.music.load('through space.ogg')
# pg.mixer.music.set_volume(1.0)
# pg.mixer.music.play(loops=-1)
with change_dir('img'):
self.hype_background = pygame.image.load('starfield.png').convert()
self.hype_background_rect = self.hype_background.get_rect() | 30 | 109 | 0.759524 |
5880d4ef6c6c2426340b0a8c1b5d6e0f0cf59848 | 22,524 | py | Python | ects/consensus/block_creation.py | ects-io/ects-blockchain | a798034a8c8bce34d4b87fb2c98351d06f9eaf8e | [
"Apache-2.0"
] | null | null | null | ects/consensus/block_creation.py | ects-io/ects-blockchain | a798034a8c8bce34d4b87fb2c98351d06f9eaf8e | [
"Apache-2.0"
] | null | null | null | ects/consensus/block_creation.py | ects-io/ects-blockchain | a798034a8c8bce34d4b87fb2c98351d06f9eaf8e | [
"Apache-2.0"
] | null | null | null | import logging
import random
from dataclasses import replace
from typing import Callable, Dict, List, Optional, Tuple
import blspy
from blspy import G1Element, G2Element
from chiabip158 import PyBIP158
from ects.consensus.block_record import BlockRecord
from ects.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from ects.consensus.blockchain_interface import BlockchainInterface
from ects.consensus.coinbase import create_farmer_coin, create_pool_coin
from ects.consensus.constants import ConsensusConstants
from ects.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from ects.full_node.mempool_check_conditions import get_name_puzzle_conditions
from ects.full_node.signage_point import SignagePoint
from ects.types.blockchain_format.coin import Coin, hash_coin_list
from ects.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from ects.types.blockchain_format.pool_target import PoolTarget
from ects.types.blockchain_format.proof_of_space import ProofOfSpace
from ects.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from ects.types.blockchain_format.sized_bytes import bytes32
from ects.types.blockchain_format.vdf import VDFInfo, VDFProof
from ects.types.end_of_slot_bundle import EndOfSubSlotBundle
from ects.types.full_block import FullBlock
from ects.types.generator_types import BlockGenerator
from ects.types.unfinished_block import UnfinishedBlock
from ects.util.hash import std_hash
from ects.util.ints import uint8, uint32, uint64, uint128
from ects.util.merkle_set import MerkleSet
from ects.util.prev_transaction_block import get_prev_transaction_block
from ects.util.recursive_replace import recursive_replace
log = logging.getLogger(__name__)
# TODO: address hint error and remove ignore
# error: Incompatible default for argument "seed" (default has type "bytes", argument has type "bytes32")
# [assignment]
def create_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes32 = b"", # type: ignore[assignment]
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = bytes32(random.randint(0, 100000000).to_bytes(32, "big"))
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytes32] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
foliage_transaction_block_hash: Optional[bytes32]
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list()
result: NPCResult = get_name_puzzle_conditions(
block_generator,
constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=constants.COST_PER_BYTE,
mempool_mode=True,
)
cost = calculate_cost_of_program(block_generator.program, result, constants.COST_PER_BYTE)
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
# TODO: address hint error and remove ignore
# error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32"
# [arg-type]
byte_array_tx.append(bytearray(coin.puzzle_hash)) # type: ignore[arg-type]
for coin in removals:
tx_removals.append(coin.name())
# TODO: address hint error and remove ignore
# error: Argument 1 to "append" of "list" has incompatible type "bytearray"; expected "bytes32"
# [arg-type]
byte_array_tx.append(bytearray(coin.name())) # type: ignore[arg-type]
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
removal_merkle_set = MerkleSet()
addition_merkle_set = MerkleSet()
# Create removal Merkle set
for coin_name in tx_removals:
removal_merkle_set.add_already_hashed(coin_name)
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin)
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coin_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
additions_root = addition_merkle_set.get_root()
removals_root = removal_merkle_set.get_root()
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
# TODO: address hint error and remove ignore
# error: Incompatible default for argument "seed" (default has type "bytes", argument has type "bytes32")
# [assignment]
def create_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes32 = b"", # type: ignore[assignment]
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: List[EndOfSubSlotBundle] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transctions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: bytes32 = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list() if block_generator else [],
)
def unfinished_block_to_full_block(
unfinished_block: UnfinishedBlock,
cc_ip_vdf: VDFInfo,
cc_ip_proof: VDFProof,
rc_ip_vdf: VDFInfo,
rc_ip_proof: VDFProof,
icc_ip_vdf: Optional[VDFInfo],
icc_ip_proof: Optional[VDFProof],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
difficulty: uint64,
) -> FullBlock:
"""
Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking
other properties (height, weight, sub-slots, etc)
Args:
unfinished_block: the unfinished block to finish
cc_ip_vdf: the challenge chain vdf info at the infusion point
cc_ip_proof: the challenge chain proof
rc_ip_vdf: the reward chain vdf info at the infusion point
rc_ip_proof: the reward chain proof
icc_ip_vdf: the infused challenge chain vdf info at the infusion point
icc_ip_proof: the infused challenge chain proof
finished_sub_slots: finished sub slots from the prev block to the infusion point
prev_block: prev block from the infusion point
blocks: dictionary from header hash to SBR of all included SBR
total_iters_sp: total iters at the signage point
difficulty: difficulty at the infusion point
"""
# Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information
if prev_block is None:
is_transaction_block = True
new_weight = uint128(difficulty)
new_height = uint32(0)
new_foliage = unfinished_block.foliage
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
new_weight = uint128(prev_block.weight + difficulty)
new_height = uint32(prev_block.height + 1)
if is_transaction_block:
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
new_fbs = unfinished_block.foliage.foliage_transaction_block_signature
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
new_fbh = None
new_fbs = None
new_foliage_transaction_block = None
new_tx_info = None
new_generator = None
new_generator_ref_list = []
assert (new_fbh is None) == (new_fbs is None)
new_foliage = replace(
unfinished_block.foliage,
prev_block_hash=prev_block.header_hash,
foliage_transaction_block_hash=new_fbh,
foliage_transaction_block_signature=new_fbs,
)
ret = FullBlock(
finished_sub_slots,
RewardChainBlock(
new_weight,
new_height,
unfinished_block.reward_chain_block.total_iters,
unfinished_block.reward_chain_block.signage_point_index,
unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash,
unfinished_block.reward_chain_block.proof_of_space,
unfinished_block.reward_chain_block.challenge_chain_sp_vdf,
unfinished_block.reward_chain_block.challenge_chain_sp_signature,
cc_ip_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_signature,
rc_ip_vdf,
icc_ip_vdf,
is_transaction_block,
),
unfinished_block.challenge_chain_sp_proof,
cc_ip_proof,
unfinished_block.reward_chain_sp_proof,
rc_ip_proof,
icc_ip_proof,
new_foliage,
new_foliage_transaction_block,
new_tx_info,
new_generator,
new_generator_ref_list,
)
return recursive_replace(
ret,
"foliage.reward_block_hash",
ret.reward_chain_block.get_hash(),
)
| 41.944134 | 117 | 0.694637 |
6607f05cf8f189d11d1408fcd1a525a2fa247355 | 1,880 | py | Python | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateFaceConfigRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateFaceConfigRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/UpdateFaceConfigRequest.py | leafcoder/aliyun-openapi-python-sdk | 26b441ab37a5cda804de475fd5284bab699443f1 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class UpdateFaceConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'UpdateFaceConfig')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BizName(self):
return self.get_query_params().get('BizName')
def set_BizName(self,BizName):
self.add_query_param('BizName',BizName)
def get_BizType(self):
return self.get_query_params().get('BizType')
def set_BizType(self,BizType):
self.add_query_param('BizType',BizType)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | 33.571429 | 75 | 0.756915 |
e483ffbdacccab935d795cfe904837b98369d7fe | 1,999 | py | Python | stwfsapy/tests/expansion/ampersand_expansion_test.py | mo-fu/stwfsapy | dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322 | [
"Apache-2.0"
] | null | null | null | stwfsapy/tests/expansion/ampersand_expansion_test.py | mo-fu/stwfsapy | dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322 | [
"Apache-2.0"
] | null | null | null | stwfsapy/tests/expansion/ampersand_expansion_test.py | mo-fu/stwfsapy | dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Leibniz Information Centre for Economics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stwfsapy.expansion as e
first = 'R'
second = 'D'
ampersand_string = "{}&{}".format(first, second)
multi_ampersand_string = "A&" + ampersand_string
def test_matches():
match = e._ampersand_abbreviation_matcher.search(ampersand_string)
assert match is not None
assert match.group(0) == ampersand_string
assert match.group(1) == first
assert match.group(2) == second
def test_no_match_at_end_of_string():
match = e._ampersand_abbreviation_matcher.search(" "+ampersand_string)
assert match is None
def test_no_match_at_start_of_string():
match = e._ampersand_abbreviation_matcher.search(ampersand_string + " ")
assert match is None
def test_no_match_multiple_ampersand():
match = e._ampersand_abbreviation_matcher.search(multi_ampersand_string)
assert match is None
def test_replacement():
replaced = e._expand_ampersand_with_spaces_fun(ampersand_string)
assert len(replaced) == len(ampersand_string) + 4
for i in range(len(ampersand_string)-1):
offset = i * 3
assert replaced[offset] == ampersand_string[i]
assert replaced[offset+1] == ' '
assert replaced[offset+2] == '?'
assert replaced[-1] == ampersand_string[-1]
def test_no_replacement_multiple_ampersand():
replaced = e._expand_ampersand_with_spaces_fun(multi_ampersand_string)
assert replaced == multi_ampersand_string
| 32.770492 | 76 | 0.745373 |
2989dddaaf95576046480e8aabe648b6e8cfa424 | 12,020 | py | Python | pyvda/pyvda.py | choplin/pyvda | ea89917ed1f36e43d766bd5e65697cc426314485 | [
"MIT"
] | null | null | null | pyvda/pyvda.py | choplin/pyvda | ea89917ed1f36e43d766bd5e65697cc426314485 | [
"MIT"
] | null | null | null | pyvda/pyvda.py | choplin/pyvda | ea89917ed1f36e43d766bd5e65697cc426314485 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import List
from comtypes import GUID
from ctypes import windll
from .com_defns import IApplicationView, IVirtualDesktop
from .utils import (
get_vd_manager_internal,
get_view_collection,
get_pinned_apps,
)
ASFW_ANY = -1
class AppView():
"""
A wrapper around an `IApplicationView` object exposing window functionality relating to:
* Setting focus
* Pinning and unpinning (making a window persistent across all virtual desktops)
* Moving a window between virtual desktops
"""
def __init__(self, hwnd: int = None, view: 'IApplicationView' = None):
"""One of the following parameters must be provided:
Args:
hwnd (int, optional): Handle to a window. Defaults to None.
view (IApplicationView, optional): An `IApplicationView` object. Defaults to None.
"""
if hwnd:
# Get the IApplicationView for the window
view_collection = get_view_collection()
self._view = view_collection.GetViewForHwnd(hwnd)
elif view:
self._view = view
else:
raise Exception(f"Must pass 'hwnd' or 'view'")
def __eq__(self, other):
return self.hwnd == other.hwnd
@property
def hwnd(self) -> int:
"""This window's handle.
"""
return self._view.GetThumbnailWindow()
@property
def app_id(self) -> int:
"""The ID of this window's app.
"""
return self._view.GetAppUserModelId()
@classmethod
def current(cls):
"""
Returns:
AppView: An AppView for the currently focused window.
"""
view_collection = get_view_collection()
focused = view_collection.GetViewInFocus()
return cls(view=focused)
# ------------------------------------------------
# IApplicationView methods
# ------------------------------------------------
def is_shown_in_switchers(self) -> bool:
"""Is the view shown in the alt-tab view?
"""
return bool(self._view.GetShowInSwitchers())
def is_visible(self) -> bool:
"""Is the view visible?
"""
return bool(self._view.GetVisibility())
def get_activation_timestamp(self) -> int:
"""Get the last activation timestamp for this window.
"""
return self._view.GetLastActivationTimestamp()
def set_focus(self):
"""Focus the window"""
return self._view.SetFocus()
def switch_to(self):
"""Switch to the window. Behaves slightly differently to set_focus -
this is what is called when you use the alt-tab menu."""
return self._view.SwitchTo()
# ------------------------------------------------
# IVirtualDesktopPinnedApps methods
# ------------------------------------------------
def pin(self):
"""
Pin the window (corresponds to the 'show window on all desktops' toggle).
"""
pinnedApps = get_pinned_apps()
pinnedApps.PinView(self._view)
def unpin(self):
"""
Unpin the window (corresponds to the 'show window on all desktops' toggle).
"""
pinnedApps = get_pinned_apps()
pinnedApps.UnpinView(self._view)
def is_pinned(self) -> bool:
"""
Check if this window is pinned (corresponds to the 'show window on all desktops' toggle).
Returns:
bool: is the window pinned?
"""
pinnedApps = get_pinned_apps()
return pinnedApps.IsViewPinned(self._view)
def pin_app(self):
"""
Pin this window's app (corresponds to the 'show windows from this app on all desktops' toggle).
"""
pinnedApps = get_pinned_apps()
pinnedApps.PinAppID(self.app_id)
def unpin_app(self):
"""
Unpin this window's app (corresponds to the 'show windows from this app on all desktops' toggle).
"""
pinnedApps = get_pinned_apps()
pinnedApps.UnpinAppID(self.app_id)
def is_app_pinned(self) -> bool:
"""
Check if this window's app is pinned (corresponds to the 'show windows from this app on all desktops' toggle).
Returns:
bool: is the app pinned?.
"""
pinnedApps = get_pinned_apps()
return pinnedApps.IsAppIdPinned(self.app_id)
# ------------------------------------------------
# IVirtualDesktopManagerInternal methods
# ------------------------------------------------
def move(self, desktop: VirtualDesktop):
"""Move the window to a different virtual desktop.
Args:
desktop (VirtualDesktop): Desktop to move the window to.
Example:
>>> AppView.current().move_to_desktop(VirtualDesktop(1))
"""
manager_internal = get_vd_manager_internal()
manager_internal.MoveViewToDesktop(self._view, desktop._virtual_desktop)
@property
def desktop_id(self) -> GUID:
"""
Returns:
GUID -- The ID of the desktop which the window is on.
"""
return self._view.GetVirtualDesktopId()
@property
def desktop(self) -> VirtualDesktop:
"""
Returns:
VirtualDesktop: The virtual desktop which this window is on.
"""
return VirtualDesktop(desktop_id=self.desktop_id)
def is_on_desktop(self, desktop: VirtualDesktop, include_pinned: bool = True) -> bool:
"""Is this window on the passed virtual desktop?
Args:
desktop (VirtualDesktop): Desktop to check
include_pinned (bool, optional): Also return `True` for pinned windows
Example:
>>> AppView.current().is_on_desktop(VirtualDesktop(1))
"""
if include_pinned:
return (self.desktop_id == desktop.id) or self.is_pinned() or self.is_app_pinned()
else:
return self.desktop_id == desktop.id
def is_on_current_desktop(self) -> bool:
"""Is this window on the current desktop?
"""
return self.is_on_desktop(VirtualDesktop.current())
def get_apps_by_z_order(switcher_windows: bool = True, current_desktop: bool = True) -> List[AppView]:
"""Get a list of AppViews, ordered by their Z position, with
the foreground window first.
Args:
switcher_windows (bool, optional): Only include windows which appear in the alt-tab dialogue. Defaults to True.
current_desktop (bool, optional): Only include windows which are on the current virtual desktop. Defaults to True.
Returns:
List[AppView]: AppViews matching the specified criteria.
"""
collection = get_view_collection()
views_arr = collection.GetViewsByZOrder()
all_views = [AppView(view=v) for v in views_arr.iter(IApplicationView)]
if not switcher_windows and not current_desktop:
# no filters
return all_views
else:
result = []
vd = VirtualDesktop.current()
for view in all_views:
if switcher_windows and not view.is_shown_in_switchers():
continue
if current_desktop and not view.is_on_desktop(vd):
continue
result.append(view)
return result
class VirtualDesktop():
"""
Wrapper around the `IVirtualDesktop` COM object, representing one virtual desktop.
"""
def __init__(
self,
number: int = None,
desktop_id: GUID = None,
desktop: 'IVirtualDesktop' = None,
current: bool = False
):
"""One of the following arguments must be provided:
Args:
number (int, optional): The number of the desired desktop in the task view (1-indexed). Defaults to None.
desktop_id (GUID, optional): A desktop GUID. Defaults to None.
desktop (IVirtualDesktop, optional): An `IVirtualDesktop`. Defaults to None.
current (bool, optional): The current virtual desktop. Defaults to False.
"""
self._manager_internal = get_vd_manager_internal()
if number:
if number <= 0:
raise ValueError(f"Desktop number must be at least 1, {number} provided")
array = self._manager_internal.GetDesktops()
desktop_count = array.GetCount()
if number > desktop_count:
raise ValueError(
f"Desktop number {number} exceeds the number of desktops, {desktop_count}."
)
self._virtual_desktop = array.get_at(number - 1, IVirtualDesktop)
elif desktop_id:
self._virtual_desktop = self._manager_internal.FindDesktop(desktop_id)
elif desktop:
self._virtual_desktop = desktop
elif current:
self._virtual_desktop = self._manager_internal.GetCurrentDesktop()
else:
raise Exception("Must provide one of 'number', 'desktop_id' or 'desktop'")
@classmethod
def current(cls):
"""Convenience method to return a `VirtualDesktop` object for the
currently active desktop.
Returns:
VirtualDesktop: The current desktop.
"""
return cls(current=True)
@property
def id(self) -> GUID:
"""The GUID of this desktop.
Returns:
GUID: The unique id for this desktop.
"""
return self._virtual_desktop.GetID()
@property
def number(self) -> int:
"""The index of this virtual desktop in the task view. Between 1 and
the total number of desktops active.
Returns:
int: The desktop number.
"""
array = self._manager_internal.GetDesktops()
for i, vd in enumerate(array.iter(IVirtualDesktop), 1):
if self.id == vd.GetID():
return i
else:
raise Exception(f"Desktop with ID {self.id} not found")
def go(self, allow_set_foreground: bool = True):
"""Switch to this virtual desktop.
Args:
allow_set_foreground (bool, optional): Call AllowSetForegroundWindow(ASFW_ANY) before switching. This partially fixes an issue where the focus remains behind after switching. Defaults to True.
Note:
More details at https://github.com/Ciantic/VirtualDesktopAccessor/issues/4 and https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-allowsetforegroundwindow.
"""
if allow_set_foreground:
windll.user32.AllowSetForegroundWindow(ASFW_ANY)
self._manager_internal.SwitchDesktop(self._virtual_desktop)
def apps_by_z_order(self, include_pinned: bool = True) -> List[AppView]:
"""Get a list of AppViews, ordered by their Z position, with
the foreground window first.
Args:
switcher_windows (bool, optional): Only include windows which appear in the alt-tab dialogue. Defaults to True.
current_desktop (bool, optional): Only include windows which are on the current virtual desktop. Defaults to True.
Returns:
List[AppView]: AppViews matching the specified criteria.
"""
collection = get_view_collection()
views_arr = collection.GetViewsByZOrder()
all_views = [AppView(view=v) for v in views_arr.iter(IApplicationView)]
result = []
for view in all_views:
if view.is_shown_in_switchers() and view.is_on_desktop(self, include_pinned):
result.append(view)
return result
def get_virtual_desktops() -> List[VirtualDesktop]:
"""Return a list of all current virtual desktops, one for each desktop visible in the task view.
Returns:
List[VirtualDesktop]: Virtual desktops currently active.
"""
manager_internal = get_vd_manager_internal()
array = manager_internal.GetDesktops()
return [VirtualDesktop(desktop=vd) for vd in array.iter(IVirtualDesktop)]
| 33.575419 | 204 | 0.612812 |
225318dcd097d400435197dddde9d8a1d4d8da90 | 58,691 | py | Python | tensorflow_probability/python/distributions/internal/statistical_testing.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/internal/statistical_testing.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/internal/statistical_testing.py | ykkawana/probability | 65bfd91cf6e855674da8dd9976c067f79da46e90 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Statistical test assertions calibrated for their error rates.
Statistical tests have an inescapable probability of error: a correct
sampler can still fail a test by chance, and an incorrect sampler can
still pass a test by chance. This library is about bounding both of
those error rates. This requires admitting a task-specific notion of
"discrepancy": Correct code will fail rarely, code that misbehaves by
more than the discrepancy will pass rarely, and nothing reliable can
be said about code that misbehaves, but misbehaves by less than the
discrepancy.
# Example
Consider testing that the mean of a scalar probability distribution P
is some expected constant. Suppose the support of P is the interval
`[0, 1]`. Then you might do this:
```python
from tensorflow_probability.python.distributions.internal import statistical_testing
expected_mean = ...
num_samples = 5000
samples = ... draw 5000 samples from P
# Check that the mean looks right
check1 = statistical_testing.assert_true_mean_equal_by_dkwm(
samples, low=0., high=1., expected=expected_mean,
false_fail_rate=1e-6)
# Check that the difference in means detectable with 5000 samples is
# small enough
check2 = tf.assert_less(
statistical_testing.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=1.0,
false_fail_rate=1e-6, false_pass_rate=1e-6),
0.01)
# Be sure to execute both assertion ops
sess.run([check1, check2])
```
The second assertion is an instance of experiment design. It's a
deterministic computation (independent of the code under test) that
checks that `5000` samples is enough to reliably resolve mean
differences of `0.01` or more. Here "reliably" means that if the code
under test is correct, the probability of drawing an unlucky sample
that causes this test to fail is at most 1e-6; and if the code under
test is incorrect enough that its true mean is 0.01 more or less than
expected, then the probability of drawing a "lucky" sample that causes
the test to false-pass is also at most 1e-6.
# Overview
Every function in this library can be characterized in terms of:
- The property being tested, such as the full density of the
distribution under test, or just its true mean, or a single
Bernoulli probability, etc.
- The relation being asserted, e.g., whether the mean is less, more,
or equal to the given expected value.
- The stochastic bound being relied upon, such as the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
or the CDF of the binomial distribution (for assertions about
Bernoulli probabilities).
- The number of sample sets in the statistical test. For example,
testing equality of means has a one-sample variant, where the
expected mean is given exactly, and a two-sample variant, where the
expected mean is itself given by a set of samples (e.g., from an
alternative algorithm).
- What operation(s) of the test are to be performed. Each test has
three of these:
1. `assert` executes the test. Specifically, it creates a TF op that
produces an error if it has enough evidence to prove that the
property under test is violated. These functions depend on the
desired false failure rate, because that determines the sizes of
appropriate confidence intervals, etc.
2. `min_discrepancy` computes the smallest difference reliably
detectable by that test, given the sample count and error rates.
What it's a difference of is test-specific. For example, a test
for equality of means would make detection guarantees about the
difference of the true means.
3. `min_num_samples` computes the minimum number of samples needed
to reliably detect a given discrepancy with given error rates.
The latter two are for experimental design, and are meant to be
usable either interactively or inline in the overall test method.
This library follows a naming convention, to make room for every
combination of the above. A name mentions the operation first, then
the property, then the relation, then the bound, then, if the test
takes more than one set of samples, a token indicating this. For
example, `assert_true_mean_equal_by_dkwm` (which is implicitly
one-sample). Each name is a grammatically sound noun phrase (or verb
phrase, for the asserts).
# Asymptotic properties
The number of samples needed tends to scale as `O(1/discrepancy**2)` and
as `O(log(1/error_rate))`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tensorflow as tf
from tensorflow_probability.python.internal import dtype_util
__all__ = [
'assert_true_cdf_equal_by_dkwm',
'min_discrepancy_of_true_cdfs_detectable_by_dkwm',
'min_num_samples_for_dkwm_cdf_test',
'kolmogorov_smirnov_distance',
'empirical_cdfs',
'true_mean_confidence_interval_by_dkwm',
'assert_true_mean_equal_by_dkwm',
'min_discrepancy_of_true_means_detectable_by_dkwm',
'min_num_samples_for_dkwm_mean_test',
'assert_true_mean_in_interval_by_dkwm',
'assert_true_mean_equal_by_dkwm_two_sample',
'min_discrepancy_of_true_means_detectable_by_dkwm_two_sample',
'min_num_samples_for_dkwm_mean_two_sample_test',
]
def assert_true_cdf_equal_by_dkwm(
samples, cdf, left_continuous_cdf=None, false_fail_rate=1e-6, name=None):
"""Asserts the full CDF of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true CDF of some distribution from which the given samples are
drawn is _not_ the given expected CDF with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_cdf_test` and
`min_discrepancy_of_true_cdfs_detectable_by_dkwm`.
If the distribution in question has atoms (e.g., is discrete), computing this
test requires CDF values for both sides of the discontinuity. In this case,
the `cdf` argument is assumed to compute the CDF inclusive of the atom, i.e.,
cdf(x) = Pr(X <= x). The user must also supply the `left_continuous_cdf`,
which must compute the cdf exclusive of the atom, i.e., left_continuous_cdf(x)
= Pr(X < x). Invariant: cdf(x) - left_continuous_cdf(x) = pmf(x).
For example, the two required cdfs of the degenerate distribution that places
all the mass at 0 can be given as
```
cdf=lambda x: tf.where(x < 0, 0., 1.)
left_continuous_cdf=lambda x: tf.where(x <= 0, 0., 1.)
```
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Tensor of shape [n] + B. Samples from some (batch of) scalar-event
distribution(s) of interest, giving a (batch of) empirical CDF(s).
Assumed IID across the 0 dimension.
cdf: Analytic cdf inclusive of any atoms, as a function that can compute CDF
values in batch. Must accept a Tensor of shape B + [n] and the same dtype
as `samples` and return a Tensor of shape B + [n] of CDF values. For each
sample x, `cdf(x) = Pr(X <= x)`.
left_continuous_cdf: Analytic left-continuous cdf, as a function that can
compute CDF values in batch. Must accept a Tensor of shape B + [n] and
the same dtype as `samples` and return a Tensor of shape B + [n] of CDF
values. For each sample x, `left_continuous_cdf(x) = Pr(X < x)`. If the
distribution under test has no atoms (i.e., the CDF is continuous), this
is redundant and may be omitted. Conversely, if this argument is omitted,
the test assumes the distribution is atom-free.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected CDF is
outside the corresponding confidence envelope.
"""
with tf.name_scope(
name, 'assert_true_cdf_equal_by_dkwm', [samples, false_fail_rate]):
dtype = dtype_util.common_dtype([samples, false_fail_rate], tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
tf.compat.v1.assert_scalar(false_fail_rate) # Static shape
itemwise_false_fail_rate = _itemwise_error_rate(
total_rate=false_fail_rate,
param_tensors=[], samples_tensor=samples)
n = tf.shape(input=samples)[0]
envelope = _dkwm_cdf_envelope(n, itemwise_false_fail_rate)
distance = kolmogorov_smirnov_distance(samples, cdf, left_continuous_cdf)
return tf.compat.v1.assert_less_equal(
distance, envelope, message='Empirical CDF outside K-S envelope')
def min_discrepancy_of_true_cdfs_detectable_by_dkwm(
n, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum CDF discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the K-S distances between true
CDFs detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from some
scalar distribution is enough to detect a K-S distance in CDFs of size
`discr[i]` or more. Specifically, we guarantee that (a) if the true CDF is
the expected CDF, then `assert_true_cdf_equal_by_dkwm` will fail with
probability at most `false_fail_rate / K` (which amounts to `false_fail_rate`
if applied to the whole batch at once), and (b) if the true CDF differs from
the expected CDF by at least `discr[i]`, `assert_true_cdf_equal_by_dkwm` will
pass with probability at most `false_pass_rate`.
The detectable discrepancy scales as
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with tf.name_scope(name, 'min_discrepancy_of_true_cdfs_detectable_by_dkwm',
[n, false_fail_rate, false_pass_rate]):
dtype = dtype_util.common_dtype(
[n, false_fail_rate, false_pass_rate], tf.float32)
n = tf.convert_to_tensor(value=n, name='n', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
# Algorithm: Assume a true CDF F. The DKWM inequality gives a
# stochastic bound on how far the observed empirical CDF F_n can be.
# Then, using the DKWM inequality again gives a stochastic bound on
# the farthest candidate true CDF F' that
# true_mean_confidence_interval_by_dkwm might consider. At worst, these
# errors may go in the same direction, so the distance between F and
# F' is bounded by the sum.
# On batching: false fail rates sum, so I need to reduce
# the input to account for the batching. False pass rates
# max, so I don't.
sampling_envelope = _dkwm_cdf_envelope(n, false_pass_rate)
itemwise_false_fail_rate = _itemwise_error_rate(
total_rate=false_fail_rate, param_tensors=[n])
analysis_envelope = _dkwm_cdf_envelope(n, itemwise_false_fail_rate)
return sampling_envelope + analysis_envelope
def min_num_samples_for_dkwm_cdf_test(
discrepancy, false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM CDF test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a K-S distance of CDFs of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution.
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on K-S
distances that may go undetected with probability higher than
`1 - false_pass_rate`.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
For each batch member `i`, of `K` total, drawing `n[i]` samples from some
scalar distribution is enough to detect a K-S distribution of CDFs of size
`discrepancy[i]` or more. Specifically, we guarantee that (a) if the true CDF
is the expected CDF, then `assert_true_cdf_equal_by_dkwm` will fail with
probability at most `false_fail_rate / K` (which amounts to `false_fail_rate`
if applied to the whole batch at once), and (b) if the true CDF differs from
the expected CDF by at least `discrepancy[i]`, `assert_true_cdf_equal_by_dkwm`
will pass with probability at most `false_pass_rate`.
The required number of samples scales as
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
with tf.name_scope(
name, 'min_num_samples_for_dkwm_cdf_test',
[false_fail_rate, false_pass_rate, discrepancy]):
dtype = dtype_util.common_dtype(
[false_fail_rate, false_pass_rate, discrepancy], tf.float32)
discrepancy = tf.convert_to_tensor(
value=discrepancy, name='discrepancy', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
# Could choose to cleverly allocate envelopes, but this is sound.
envelope1 = discrepancy / 2.
envelope2 = envelope1
itemwise_false_fail_rate = _itemwise_error_rate(
total_rate=false_fail_rate, param_tensors=[discrepancy])
n1 = -tf.math.log(itemwise_false_fail_rate / 2.) / (2. * envelope1**2)
n2 = -tf.math.log(false_pass_rate / 2.) / (2. * envelope2**2)
return tf.maximum(n1, n2)
def kolmogorov_smirnov_distance(
samples, cdf, left_continuous_cdf=None, name=None):
"""Computes the Kolmogorov-Smirnov distance between the given CDFs.
The (absolute) Kolmogorov-Smirnov distance is the maximum (absolute)
discrepancy between the CDFs, i.e.,
sup_x(|cdf1(x) - cdf2(x)|)
This is tractable to compute exactly when at least one CDF in question is an
empirical CDF given by samples, because the analytic one need only be queried
at the sampled values.
If the distribution in question has atoms (e.g., is discrete), computing the
distance requires CDF values for both sides of the discontinuity. In this
case, the `cdf` argument is assumed to compute the CDF inclusive of the atom,
i.e., cdf(x) = Pr(X <= x). The user must also supply the
`left_continuous_cdf`, which must compute the cdf exclusive of the atom, i.e.,
left_continuous_cdf(x) = Pr(X < x).
For example, the two required cdfs of the degenerate distribution that places
all the mass at 0 can be given as
```
cdf=lambda x: tf.where(x < 0, 0., 1.)
left_continuous_cdf=lambda x: tf.where(x <= 0, 0., 1.)
```
Args:
samples: Tensor of shape [n] + B. Samples from some (batch of) scalar-event
distribution(s) of interest, giving a (batch of) empirical CDF(s).
Assumed IID across the 0 dimension.
cdf: Analytic cdf inclusive of any atoms, as a function that can compute CDF
values in batch. Must accept a Tensor of shape B + [n] and the same dtype
as `samples` and return a Tensor of shape B + [n] of CDF values. For each
sample x, `cdf(x) = Pr(X <= x)`.
left_continuous_cdf: Analytic left-continuous cdf, as a function that can
compute CDF values in batch. Must accept a Tensor of shape B + [n] and
the same dtype as `samples` and return a Tensor of shape B + [n] of CDF
values. For each sample x, `left_continuous_cdf(x) = Pr(X < x)`. If the
distribution under test has no atoms (i.e., the CDF is continuous), this
is redundant and may be omitted. Conversely, if this argument is omitted,
the test assumes the distribution is atom-free.
name: A name for this operation (optional).
Returns:
distance: Tensor of shape B: (Absolute) Kolmogorov-Smirnov distance between
the empirical and analytic CDFs.
"""
with tf.name_scope(
name, 'kolmogorov_smirnov_distance', [samples]):
rank = tf.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = tf.concat([tf.range(1, rank), [0]], axis=0)
samples = tf.transpose(a=samples, perm=perm)
# Order the samples within each batch member
samples = _batch_sort_vector(samples)
# Compute analytic cdf values at each sample
cdfs = cdf(samples)
if left_continuous_cdf is None:
left_continuous_cdfs = cdfs
else:
left_continuous_cdfs = left_continuous_cdf(samples)
# Compute per-batch-member empirical cdf values at each sample
# If any samples within a batch member are repeated, some of the entries
# will be wrong:
# - In low_empirical_cdfs, the first sample in a run of equal samples will
# have the correct cdf value, and the others will be too high; and
# - In high_empirical_cdfs, the last sample in a run of equal samples will
# have the correct cdf value, and the others will be too low.
# However, this is OK, because those errors do not change the maximums.
# Could defensively use `empirical_cdfs` here, but those rely on the
# relatively more expensive `searchsorted` operation.
n = tf.cast(tf.shape(input=samples)[-1], dtype=cdfs.dtype)
low_empirical_cdfs = tf.range(n, dtype=cdfs.dtype) / n
high_empirical_cdfs = tf.range(1, n+1, dtype=cdfs.dtype) / n
# Compute per-batch K-S distances on either side of each discontinuity in
# the empirical CDF. I only need one-sided comparisons in both cases,
# because the empirical CDF is piecewise constant and the true CDF is
# monotonic: The maximum of F(x) - F_n(x) occurs just before a
# discontinuity, and the maximum of F_n(x) - F(x) occurs just after.
low_distances = tf.reduce_max(
input_tensor=left_continuous_cdfs - low_empirical_cdfs, axis=-1)
high_distances = tf.reduce_max(
input_tensor=high_empirical_cdfs - cdfs, axis=-1)
return tf.maximum(low_distances, high_distances)
def _batch_sort_vector(x, ascending=True, name=None):
"""Batch sort. Sorts the -1 dimension of each batch member independently."""
with tf.name_scope(name, '_batch_sort_vector', [x]):
x = tf.convert_to_tensor(value=x, name='x')
n = tf.shape(input=x)[-1]
if ascending:
y, _ = tf.nn.top_k(-x, k=n, sorted=True)
y = -y
else:
y, _ = tf.nn.top_k(x, k=n, sorted=True)
y.set_shape(x.shape)
return y
def empirical_cdfs(samples, continuity='right', dtype=tf.float32):
"""Evaluates the empirical CDF on a batch of potentially repeated samples.
This is non-trivial because
- If the samples can repeat, their (sorted) position does not uniquely
determine their CDF value: the empirical CDF of the index-1 element of
[0, 0.5, 0.5, 1] is 0.5, not 0.25.
- However, samples repeating _across batch members_ must not affect each
other.
Note: Returns results parallel to `samples`, i.e., the values of the empirical
CDF at those points. In principle, it would also be reasonable to compact
the empirical CDF to only mention each unique sample once, but that would
produce a ragged result across batches.
Note: The sample dimension is _last_, and the samples must be _sorted_ within
each batch.
Args:
samples: Tensor of shape `batch + [num_samples]` of samples. The samples
must be in ascending order within each batch member.
continuity: Whether to return a conventional, right-continuous CDF
(`continuity = 'right'`, default) or a left-continuous CDF (`continuity =
'left'`). The value at each point `x` will be `F_n(X <= x)` or
`F_n(X < x)`, respectively. The difference between the right-continuous
and left-continuous CDFs is the empirical pmf, i.e., how many times each
sample occurs in its batch.
dtype: dtype at which to evaluate the desired empirical CDFs.
Returns:
cdf: Tensor parallel to `samples`. For each x in samples, gives the (right-
or left-continuous, per the `continuity` argument) cdf at that position.
If `samples` contains duplicates, `cdf` will give each the same value.
"""
if continuity not in ['left', 'right']:
msg = 'Continuity value must be "left" or "right", got {}.'.format(
continuity)
raise ValueError(msg)
n = tf.cast(tf.shape(input=samples)[-1], dtype=dtype)
indexes = tf.searchsorted(samples, samples, side=continuity)
return tf.cast(indexes, dtype=dtype) / n
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with tf.name_scope(name, 'do_maximum_mean', [samples, envelope, high]):
dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
envelope = tf.convert_to_tensor(
value=envelope, name='envelope', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
n = tf.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = tf.concat([tf.range(1, n), [0]], axis=0)
samples = tf.transpose(a=samples, perm=perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = tf.shape(input=samples)[-1]
batch_size = tf.cast(batch_size, dtype=dtype)
step = 1. / batch_size
cum_steps = step * tf.range(1, batch_size + 1, dtype=dtype)
max_mean_contrib = tf.clip_by_value(
cum_steps - envelope[..., tf.newaxis],
clip_value_min=0.,
clip_value_max=step)
return tf.reduce_sum(
input_tensor=samples * max_mean_contrib, axis=-1) + envelope * high
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point `Tensor` of upper bounds on the distributions'
supports. `samples <= high`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with tf.name_scope(name, 'maximum_mean', [samples, envelope, high]):
dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
envelope = tf.convert_to_tensor(
value=envelope, name='envelope', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
xmax = tf.reduce_max(input_tensor=samples, axis=[0])
msg = 'Given sample maximum value exceeds expectations'
check_op = tf.compat.v1.assert_less_equal(xmax, high, message=msg)
with tf.control_dependencies([check_op]):
return tf.identity(_do_maximum_mean(samples, envelope, high))
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point `Tensor` of lower bounds on the distributions'
supports. `samples >= low`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with tf.name_scope(name, 'minimum_mean', [samples, envelope, low]):
dtype = dtype_util.common_dtype([samples, envelope, low], tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
envelope = tf.convert_to_tensor(
value=envelope, name='envelope', dtype=dtype)
low = tf.convert_to_tensor(value=low, name='low', dtype=dtype)
xmin = tf.reduce_min(input_tensor=samples, axis=[0])
msg = 'Given sample minimum value falls below expectations'
check_op = tf.compat.v1.assert_greater_equal(xmin, low, message=msg)
with tf.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
def _dkwm_cdf_envelope(n, error_rate, name=None):
"""Computes the CDF envelope that the DKWM inequality licenses.
The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
gives a stochastic bound on the distance between the true cumulative
distribution function (CDF) of any distribution and its empirical
CDF. To wit, for `n` iid samples from any distribution with CDF F,
```none
P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
```
This function computes the envelope size `eps` as a function of the
number of samples `n` and the desired limit on the left-hand
probability above.
Args:
n: `Tensor` of numbers of samples drawn.
error_rate: Floating-point `Tensor` of admissible rates of mistakes.
name: A name for this operation (optional).
Returns:
eps: `Tensor` of maximum distances the true CDF can be from the
empirical CDF. This scales as `O(sqrt(-log(error_rate)))` and
as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and
`error_rate`.
"""
with tf.name_scope(name, 'dkwm_cdf_envelope', [n, error_rate]):
n = tf.cast(n, dtype=error_rate.dtype)
return tf.sqrt(-tf.math.log(error_rate / 2.) / (2. * n))
def _check_shape_dominates(samples, parameters):
"""Check that broadcasting `samples` against `parameters` does not expand it.
Why? Because I want to be very sure that the samples tensor is not
accidentally enlarged by broadcasting against tensors that are
supposed to be describing the distribution(s) sampled from, lest the
sample counts end up inflated.
Args:
samples: A `Tensor` whose shape is to be protected against broadcasting.
parameters: A list of `Tensor`s who are parameters for the statistical test.
Returns:
samples: Return original `samples` with control dependencies attached
to ensure no broadcasting.
"""
def check(t):
samples_batch_shape = tf.shape(input=samples)[1:]
broadcasted_batch_shape = tf.broadcast_dynamic_shape(
samples_batch_shape, tf.shape(input=t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = tf.size(input=samples_batch_shape)
ge = tf.compat.v1.assert_greater_equal(samples_batch_ndims, tf.rank(t))
eq = tf.compat.v1.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
checks = list(itertools.chain(*[check(t) for t in parameters]))
with tf.control_dependencies(checks):
return tf.identity(samples)
def true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=1e-6, name=None):
"""Computes a confidence interval for the mean of a scalar distribution.
In batch mode, computes confidence intervals for all distributions
in the batch (which need not be identically distributed).
Relies on the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
The probability (over the randomness of drawing the given samples)
that any true mean is outside the corresponding returned interval is
no more than the given `error_rate`. The size of the intervals
scale as
`O(1 / sqrt(#samples))`, as `O(high - low)`, and as `O(-log(error_rate))`.
Note that `error_rate` is a total error rate for all the confidence
intervals in the batch. As such, if the batch is nontrivial, the
error rate is not broadcast but divided (evenly) among the batch
members.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
error_rate: *Scalar* floating-point `Tensor` admissible total rate
of mistakes.
name: A name for this operation (optional).
Returns:
low: A floating-point `Tensor` of stochastic lower bounds on the
true means.
high: A floating-point `Tensor` of stochastic upper bounds on the
true means.
"""
with tf.name_scope(name, 'true_mean_confidence_interval_by_dkwm',
[samples, low, high, error_rate]):
dtype = dtype_util.common_dtype(
[samples, low, high, error_rate], tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
low = tf.convert_to_tensor(value=low, name='low', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
error_rate = tf.convert_to_tensor(
value=error_rate, name='error_rate', dtype=dtype)
samples = _check_shape_dominates(samples, [low, high])
tf.compat.v1.assert_scalar(error_rate) # Static shape
itemwise_error_rate = _itemwise_error_rate(
total_rate=error_rate, param_tensors=[low, high],
samples_tensor=samples)
n = tf.shape(input=samples)[0]
envelope = _dkwm_cdf_envelope(n, itemwise_error_rate)
min_mean = _minimum_mean(samples, envelope, low)
max_mean = _maximum_mean(samples, envelope, high)
return min_mean, max_mean
def _itemwise_error_rate(
total_rate, param_tensors, samples_tensor=None, name=None):
"""Distributes a total error rate for a batch of assertions."""
with tf.name_scope(name, 'itemwise_error_rate',
[total_rate, param_tensors, samples_tensor]):
result_shape = [1]
for p_tensor in param_tensors:
result_shape = tf.broadcast_dynamic_shape(
tf.shape(input=p_tensor), result_shape)
if samples_tensor is not None:
result_shape = tf.broadcast_dynamic_shape(
tf.shape(input=samples_tensor)[1:], result_shape)
num_items = tf.reduce_prod(input_tensor=result_shape)
return total_rate / tf.cast(num_items, dtype=total_rate.dtype)
def assert_true_mean_equal_by_dkwm(
samples, low, high, expected, false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true mean of some distribution from which the given samples are
drawn is _not_ the given expected mean with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected: Floating-point `Tensor` of expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean is
outside the corresponding confidence interval.
"""
with tf.name_scope(name, 'assert_true_mean_equal_by_dkwm',
[samples, low, high, expected, false_fail_rate]):
return assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected, expected, false_fail_rate)
def min_discrepancy_of_true_means_detectable_by_dkwm(
n, low, high, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true
means detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discr[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The detectable discrepancy scales as
- `O(high[i] - low[i])`,
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with tf.name_scope(name, 'min_discrepancy_of_true_means_detectable_by_dkwm',
[n, low, high, false_fail_rate, false_pass_rate]):
dtype = dtype_util.common_dtype(
[n, low, high, false_fail_rate, false_pass_rate], tf.float32)
n = tf.convert_to_tensor(value=n, name='n', dtype=dtype)
low = tf.convert_to_tensor(value=low, name='low', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
cdf_discrepancy = min_discrepancy_of_true_cdfs_detectable_by_dkwm(
n, false_fail_rate, false_pass_rate)
return (high - low) * cdf_discrepancy
def min_num_samples_for_dkwm_mean_test(
discrepancy, low, high,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM mean test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a mean difference of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution supported on `[low, high]`.
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low: `Tensor` of lower bounds on the distributions' support.
high: `Tensor` of upper bounds on the distributions' support.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
The `discrepancy`, `low`, and `high` tensors must have
broadcast-compatible shapes.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discrepancy[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discrepancy[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The required number of samples scales
as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`,
`O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`.
"""
with tf.name_scope(
name, 'min_num_samples_for_dkwm_mean_test',
[low, high, false_fail_rate, false_pass_rate, discrepancy]):
dtype = dtype_util.common_dtype(
[low, high, false_fail_rate, false_pass_rate, discrepancy], tf.float32)
discrepancy = tf.convert_to_tensor(
value=discrepancy, name='discrepancy', dtype=dtype)
low = tf.convert_to_tensor(value=low, name='low', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
cdf_discrepancy = discrepancy / (high - low)
return min_num_samples_for_dkwm_cdf_test(
cdf_discrepancy, false_fail_rate, false_pass_rate)
def assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected_low, expected_high,
false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is in the given interval.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the mean of the distribution from which the given samples are
drawn is _outside_ the given interval with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want
to check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected_low: Floating-point `Tensor` of lower bounds on the
expected true means.
expected_high: Floating-point `Tensor` of upper bounds on the
expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean
interval does not overlap with the corresponding confidence
interval.
"""
args_list = [samples, low, high, expected_low, expected_high, false_fail_rate]
with tf.name_scope(
name, 'assert_true_mean_in_interval_by_dkwm', args_list):
dtype = dtype_util.common_dtype(args_list, tf.float32)
samples = tf.convert_to_tensor(value=samples, name='samples', dtype=dtype)
low = tf.convert_to_tensor(value=low, name='low', dtype=dtype)
high = tf.convert_to_tensor(value=high, name='high', dtype=dtype)
expected_low = tf.convert_to_tensor(
value=expected_low, name='expected_low', dtype=dtype)
expected_high = tf.convert_to_tensor(
value=expected_high, name='expected_high', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
samples = _check_shape_dominates(
samples, [low, high, expected_low, expected_high])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, false_fail_rate)
# Assert that the interval [min_mean, max_mean] intersects the
# interval [expected_low, expected_high]. This is true if
# max_mean >= expected_low and min_mean <= expected_high.
# By DeMorgan's law, that's also equivalent to
# not (max_mean < expected_low or min_mean > expected_high),
# which is a way of saying the two intervals are not disjoint.
check_confidence_interval_can_intersect = tf.compat.v1.assert_greater_equal(
max_mean,
expected_low,
message='Confidence interval does not '
'intersect: true mean smaller than expected')
with tf.control_dependencies([check_confidence_interval_can_intersect]):
return tf.compat.v1.assert_less_equal(
min_mean,
expected_high,
message='Confidence interval does not '
'intersect: true mean greater than expected')
def assert_true_mean_equal_by_dkwm_two_sample(
samples1, low1, high1, samples2, low2, high2,
false_fail_rate=1e-6, name=None):
"""Asserts the means of the given distributions are equal.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the means of the distributions from which the given samples are
drawn are _not_ equal with statistical significance `false_fail_rate`
or stronger, otherwise passes. If you also want to check that you
are gathering enough evidence that a pass is not spurious, see
`min_num_samples_for_dkwm_mean_two_sample_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm_two_sample`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples1: Floating-point `Tensor` of samples from the
distribution(s) A. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low1 <= samples1 <= high1`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
samples2: Floating-point `Tensor` of samples from the
distribution(s) B. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low2 <= samples2 <= high2`.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any pair of confidence
intervals true for corresponding true means do not overlap.
"""
args_list = [samples1, low1, high1, samples2, low2, high2, false_fail_rate]
with tf.name_scope(
name, 'assert_true_mean_equal_by_dkwm_two_sample', args_list):
dtype = dtype_util.common_dtype(args_list, tf.float32)
samples1 = tf.convert_to_tensor(
value=samples1, name='samples1', dtype=dtype)
low1 = tf.convert_to_tensor(value=low1, name='low1', dtype=dtype)
high1 = tf.convert_to_tensor(value=high1, name='high1', dtype=dtype)
samples2 = tf.convert_to_tensor(
value=samples2, name='samples2', dtype=dtype)
low2 = tf.convert_to_tensor(value=low2, name='low2', dtype=dtype)
high2 = tf.convert_to_tensor(value=high2, name='high2', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
samples1 = _check_shape_dominates(samples1, [low1, high1])
samples2 = _check_shape_dominates(samples2, [low2, high2])
compatible_samples = tf.compat.v1.assert_equal(
tf.shape(input=samples1)[1:],
tf.shape(input=samples2)[1:])
with tf.control_dependencies([compatible_samples]):
# Could in principle play games with cleverly allocating
# significance instead of the even split below. It may be possible
# to get tighter intervals, in order to obtain a higher power test.
# Any allocation strategy that depends only on the support bounds
# and sample counts should be valid; however, because the intervals
# scale as O(-log(false_fail_rate)), there doesn't seem to be much
# room to win.
min_mean_2, max_mean_2 = true_mean_confidence_interval_by_dkwm(
samples2, low2, high2, false_fail_rate / 2.)
return assert_true_mean_in_interval_by_dkwm(
samples1, low1, high1, min_mean_2, max_mean_2, false_fail_rate / 2.)
def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1, low1, high1, n2, low2, high2,
false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy for a two-sample DKWM-based test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true means
detectable by a two-sample DKWM-based test.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The detectable distribution scales as
- `O(high1[i] - low1[i])`, `O(high2[i] - low2[i])`,
- `O(1 / sqrt(n1[i]))`, `O(1 / sqrt(n2[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
args_list = (
[n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate])
with tf.name_scope(
name,
'min_discrepancy_of_true_means_detectable_by_dkwm_two_sample',
args_list):
dtype = dtype_util.common_dtype(args_list, tf.float32)
n1 = tf.convert_to_tensor(value=n1, name='n1', dtype=dtype)
low1 = tf.convert_to_tensor(value=low1, name='low1', dtype=dtype)
high1 = tf.convert_to_tensor(value=high1, name='high1', dtype=dtype)
n2 = tf.convert_to_tensor(value=n2, name='n2', dtype=dtype)
low2 = tf.convert_to_tensor(value=low2, name='low2', dtype=dtype)
high2 = tf.convert_to_tensor(value=high2, name='high2', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
det_disc1 = min_discrepancy_of_true_means_detectable_by_dkwm(
n1, low1, high1, false_fail_rate / 2., false_pass_rate / 2.)
det_disc2 = min_discrepancy_of_true_means_detectable_by_dkwm(
n2, low2, high2, false_fail_rate / 2., false_pass_rate / 2.)
return det_disc1 + det_disc2
def min_num_samples_for_dkwm_mean_two_sample_test(
discrepancy, low1, high1, low2, high2,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a two-sample DKWM mean test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The required number of samples scales as
- `O((high1[i] - low1[i])**2)`, `O((high2[i] - low2[i])**2)`,
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
args_list = (
[low1, high1, low2, high2, false_fail_rate, false_pass_rate, discrepancy])
with tf.name_scope(
name,
'min_num_samples_for_dkwm_mean_two_sample_test',
args_list):
dtype = dtype_util.common_dtype(args_list, tf.float32)
discrepancy = tf.convert_to_tensor(
value=discrepancy, name='discrepancy', dtype=dtype)
low1 = tf.convert_to_tensor(value=low1, name='low1', dtype=dtype)
high1 = tf.convert_to_tensor(value=high1, name='high1', dtype=dtype)
low2 = tf.convert_to_tensor(value=low2, name='low2', dtype=dtype)
high2 = tf.convert_to_tensor(value=high2, name='high2', dtype=dtype)
false_fail_rate = tf.convert_to_tensor(
value=false_fail_rate, name='false_fail_rate', dtype=dtype)
false_pass_rate = tf.convert_to_tensor(
value=false_pass_rate, name='false_pass_rate', dtype=dtype)
# Could choose to cleverly allocate discrepancy tolerances and
# failure probabilities, but this is sound.
n1 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low1, high1,
false_fail_rate / 2., false_pass_rate / 2.)
n2 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low2, high2,
false_fail_rate / 2., false_pass_rate / 2.)
return n1, n2
| 46.3594 | 86 | 0.727505 |
c4b28726094121140ce9d179a135e1b59387d692 | 1,150 | py | Python | examples/with_social_auth/social_website/social_website/wsgi.py | haremmaster/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 19 | 2015-01-01T16:23:06.000Z | 2020-01-02T22:42:17.000Z | examples/with_social_auth/social_website/social_website/wsgi.py | haremmaster/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 2 | 2015-01-01T16:34:59.000Z | 2015-03-26T10:30:59.000Z | examples/with_all_auth/social_website/social_website/wsgi.py | laplacesdemon/django-social-friends-finder | cad63349b19b3c301626c24420ace13c63f45ad7 | [
"BSD-3-Clause"
] | 11 | 2015-01-16T18:39:34.000Z | 2021-08-13T00:46:41.000Z | """
WSGI config for social_website project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "social_website.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 39.655172 | 79 | 0.809565 |
71c29531ac59ad7571eb081e70cfee8a5d11f1dc | 2,527 | py | Python | tests/test_verify_account.py | meals-app/django-graphql-auth | f6136a14e2ff4c6cb21ab25ed66b5832b03d2fd4 | [
"MIT"
] | 290 | 2020-01-15T17:47:09.000Z | 2022-03-28T19:24:16.000Z | tests/test_verify_account.py | meals-app/django-graphql-auth | f6136a14e2ff4c6cb21ab25ed66b5832b03d2fd4 | [
"MIT"
] | 136 | 2020-01-31T16:47:30.000Z | 2022-03-29T13:17:22.000Z | tests/test_verify_account.py | meals-app/django-graphql-auth | f6136a14e2ff4c6cb21ab25ed66b5832b03d2fd4 | [
"MIT"
] | 85 | 2020-02-07T12:48:26.000Z | 2022-03-24T08:56:59.000Z | from django.contrib.auth import get_user_model
from .testCases import RelayTestCase, DefaultTestCase
from graphql_auth.constants import Messages
from graphql_auth.utils import get_token, get_token_payload
from graphql_auth.models import UserStatus
from graphql_auth.signals import user_verified
class VerifyAccountCaseMixin:
def setUp(self):
self.user1 = self.register_user(
email="foo@email.com", username="foo", verified=False
)
self.user2 = self.register_user(
email="bar@email.com", username="bar", verified=True
)
def test_verify_user(self):
signal_received = False
def receive_signal(sender, user, signal):
self.assertEqual(user.id, self.user1.id)
nonlocal signal_received
signal_received = True
user_verified.connect(receive_signal)
token = get_token(self.user1, "activation")
executed = self.make_request(self.verify_query(token))
self.assertEqual(executed["success"], True)
self.assertFalse(executed["errors"])
self.assertTrue(signal_received)
def test_verified_user(self):
token = get_token(self.user2, "activation")
executed = self.make_request(self.verify_query(token))
self.assertEqual(executed["success"], False)
self.assertEqual(
executed["errors"]["nonFieldErrors"], Messages.ALREADY_VERIFIED
)
def test_invalid_token(self):
executed = self.make_request(self.verify_query("faketoken"))
self.assertEqual(executed["success"], False)
self.assertEqual(executed["errors"]["nonFieldErrors"], Messages.INVALID_TOKEN)
def test_other_token(self):
token = get_token(self.user2, "password_reset")
executed = self.make_request(self.verify_query(token))
self.assertEqual(executed["success"], False)
self.assertEqual(executed["errors"]["nonFieldErrors"], Messages.INVALID_TOKEN)
class VerifyAccountCase(VerifyAccountCaseMixin, DefaultTestCase):
def verify_query(self, token):
return """
mutation {
verifyAccount(token: "%s")
{ success, errors }
}
""" % (
token
)
class VerifyAccountRelayTestCase(VerifyAccountCaseMixin, RelayTestCase):
def verify_query(self, token):
return """
mutation {
verifyAccount(input:{ token: "%s"})
{ success, errors }
}
""" % (
token
)
| 33.25 | 86 | 0.651365 |
4ba77f79ef0928de2a5401f4d4be8410cd4bab6d | 9,288 | py | Python | archive/lym_project/deep_conv_ae_spsparse_alt30.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 23 | 2018-08-23T03:58:37.000Z | 2022-02-23T05:04:54.000Z | archive/lym_project/deep_conv_ae_spsparse_alt30.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 8 | 2018-07-20T20:54:51.000Z | 2020-06-12T05:36:04.000Z | archive/lym_project/deep_conv_ae_spsparse_alt30.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 22 | 2018-05-21T23:57:20.000Z | 2022-02-21T00:48:32.000Z | import pickle
import sys
import os
import urllib
import gzip
import cPickle
import time
import lasagne
import theano
import numpy as np
import theano.tensor as T
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from theano.sandbox.neighbours import neibs2images
from lasagne.nonlinearities import sigmoid, rectify, leaky_rectify, identity
from lasagne.nonlinearities import softmax
from lasagne import regularization
from scipy import misc
from PIL import Image
from lasagne import init
from math import floor
from shape import ReshapeLayer
from batch_norms import batch_norm, SoftThresPerc
from extensive_data_aug_100x100 import data_aug
from ch_inner_prod import ChInnerProd, ChInnerProdMerge
PS = 100;
LearningRate = theano.shared(np.array(3e-2, dtype=np.float32));
NumEpochs = 100;
BatchSize = 32;
filename_code = 30;
filename_model_ae = 'model_vals/deep_conv_autoencoder_spsparse_alt{}_model_{}.pkl'.format(filename_code, '{}');
filename_mu = 'model_vals/deep_conv_autoencoder_spsparse_alt{}_mu.pkl'.format(filename_code);
filename_sigma = 'model_vals/deep_conv_autoencoder_spsparse_alt{}_sigma.pkl'.format(filename_code);
def load_data():
nbuf = 0;
X_train = np.zeros(shape=(500000, 3, 100, 100), dtype=np.float32);
lines = [line.rstrip('\n') for line in open('./data/vals/random_patches_for_all_svs/label.txt')];
for line in lines:
full_path = './data/vals/random_patches_for_all_svs/image_' + line.split()[0];
png = np.array(Image.open(full_path).convert('RGB')).transpose() / 255.0;
X_train[nbuf, :, :, :] = png;
nbuf += 1;
X_train = X_train[0:nbuf];
print "Computing mean and std";
mu = np.mean(X_train[0::int(floor(X_train.shape[0]/1000)), :, :, :].flatten());
sigma = np.std(X_train[0::int(floor(X_train.shape[0]/1000)), :, :, :].flatten());
X_train = (X_train - mu) / sigma;
print "Data Loaded", X_train.shape[0];
return X_train, mu, sigma;
def iterate_minibatches_ae(inputs, batchsize, shuffle=False):
if shuffle:
indices = np.arange(len(inputs));
np.random.shuffle(indices);
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize];
else:
excerpt = slice(start_idx, start_idx + batchsize);
yield inputs[excerpt];
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(3,3), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 480, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
mask_map = layer;
layer = batch_norm(layers.Conv2DLayer(layer, 300, filter_size=(1,1), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 1000, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 300, filter_size=(3,3), stride=1, nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 480, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=2, crop=(0,0), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
def build_training_function(network, input_var, mask_var, output_var):
print("building training function");
target_var = T.matrix('target_var');
loss = lasagne.objectives.squared_error(output_var, target_var).mean();
param_set = lasagne.layers.get_all_params(network, trainable=True);
updates = lasagne.updates.nesterov_momentum(loss, param_set, learning_rate=LearningRate, momentum=0.9);
train_func = theano.function([input_var, target_var], [loss, mask_var], updates=updates);
print("finish building training function");
return train_func;
def exc_train(train_func, X_train, network):
print("Starting training...");
print("Epoch\t\tIter\t\tLoss\t\tSpar\t\tTime");
it_div = 100;
for epoch in range(NumEpochs):
start_time = time.time();
for it in range(it_div):
# Iterate through mini batches
total_loss = 0;
total_sparsity = 0;
n_batch = 0;
for batch in iterate_minibatches_ae(X_train[it::it_div], BatchSize, shuffle=True):
batch = data_aug(batch);
batch_target = np.reshape(batch, (batch.shape[0], -1));
loss, mask = train_func(batch, batch_target);
total_loss += loss;
total_sparsity += 100.0 * float(np.count_nonzero(mask>1e-6)) / mask.size;
n_batch += 1;
total_loss /= n_batch;
total_sparsity /= n_batch;
LearningRate.set_value(np.float32(0.99*LearningRate.get_value()));
print("{:d}\t\t{:d}\t\t{:.4f}\t\t{:.3f}\t\t{:.3f}".format(
epoch, it, total_loss, total_sparsity, time.time()-start_time));
start_time = time.time();
if epoch % 1 == 0:
param_values = layers.get_all_param_values(network);
pickle.dump(param_values, open(filename_model_ae.format(epoch), 'w'));
def main():
X_train, mu, sigma = load_data();
pickle.dump(mu, open(filename_mu, 'w'));
pickle.dump(sigma, open(filename_sigma, 'w'));
# Build network
network, input_var, mask_var, output_var = build_autoencoder_network();
train_func = build_training_function(network, input_var, mask_var, output_var);
exc_train(train_func, X_train, network);
print("DONE !");
if __name__ == "__main__":
main();
| 50.478261 | 127 | 0.702412 |
0d557070fbffe712447d1333922fb2cd6bb92148 | 3,475 | py | Python | oo/carro.py | wallysso/pythonbirds | dd1e48713defaaa567dbfa1fc1a77334bd3ab374 | [
"MIT"
] | null | null | null | oo/carro.py | wallysso/pythonbirds | dd1e48713defaaa567dbfa1fc1a77334bd3ab374 | [
"MIT"
] | null | null | null | oo/carro.py | wallysso/pythonbirds | dd1e48713defaaa567dbfa1fc1a77334bd3ab374 | [
"MIT"
] | null | null | null |
"""você deve criar uma classe de carro que vai possuir dois atributos compostos por outras duas classs:
1) motor
2) direção
O motor tera a responsabilidade de controlar a velocidade.
Ele oferece os seguintes atributos:
1) Atributo de dado de velocidade
2) Método acelerar, que deverá incrementar a velocidade de uma unidade.
3) Método frear, que deverá decrementar a velocidade em duas unidades.
A direção tera a responsabilidade de controlar a direção.
Ela oferece os seguintes atributos:
1) Valor de direção com possiveis direção: NORTE, SUL, LESTE, OESTE.
2) Método virar_a_direita.
3) Método virar_a_esquerda.
N
O L
s
Exemplo:
>>> # Testando motor
>>> motor = Motor()
>>> motor.velocidade
0
>>> motor.acelerar()
>>> motor.velocidade
1
>>> motor.acelerar()
>>> motor.velocidade
2
>>> motor.acelerar()
>>> motor.velocidade
3
>>> motor.frear()
>>> motor.velocidade
1
>>> motor.frear()
>>> motor.velocidade
0
>>> # Testando Direcao
>>> direcao = Direcao()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_direita()
>>> direcao.valor
'Norte'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Oeste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Sul'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Leste'
>>> direcao.girar_a_esquerda()
>>> direcao.valor
'Norte'
>>> carro = Carro(direcao, motor)
>>> carro.calcular_velocidade()
0
>>> carro.acelerar()
>>> carro.calcular_velocidade()
1
>>> carro.acelerar()
>>> carro.calcular_velocidade()
2
>>> carro.frear()
>>> carro.calcular_velocidade()
0
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_direita()
>>> carro.calcular_direcao()
'Leste'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Norte'
>>> carro.girar_a_esquerda()
>>> carro.calcular_direcao()
'Oeste'
"""
NORTE='Norte'
SUL='Sul'
LESTE='Leste'
OESTE='Oeste'
class Motor:
def __init__(self):
self.velocidade = 0
def acelerar(self):
self.velocidade += 1
def frear(self):
self.velocidade -= 2
self.velocidade = max(0, self.velocidade)
NORTE = 'Norte'
SUL = 'Sul'
LESTE = 'Leste'
OESTE = 'Oeste'
class Direcao:
rotacao_a_direita_dtc = {
NORTE: LESTE, LESTE: SUL, SUL: OESTE, OESTE: NORTE
}
rotacao_a_esquerda_dtc = {
NORTE: OESTE, LESTE: NORTE, SUL: LESTE, OESTE: SUL
}
def __init__(self):
self.valor = NORTE
def girar_a_direita(self):
self.valor = self.rotacao_a_direita_dtc[self.valor]
def girar_a_esquerda(self):
self.valor = self.rotacao_a_esquerda_dtc[self.valor]
class Carro:
def __init__(self, direcao, motor):
self.direcao = direcao
self.motor = motor
def calcular_velocidade(self):
return self.motor.velocidade
def acelerar(self):
self.motor.acelerar()
def frear(self):
self.motor.frear()
def calcular_direcao(self):
return self.direcao.valor
def girar_a_direita(self):
self.direcao.girar_a_direita()
def girar_a_esquerda(self):
self.direcao.girar_a_esquerda() | 21.855346 | 103 | 0.625612 |
11e5abff25c947a83d6ad662925b4ea4362f82c4 | 16,352 | py | Python | holoviews/tests/core/data/testxarrayinterface.py | mkp170791/holoviews | a17f5c66eac7c23817e31edd3b3768b3be35076f | [
"BSD-3-Clause"
] | null | null | null | holoviews/tests/core/data/testxarrayinterface.py | mkp170791/holoviews | a17f5c66eac7c23817e31edd3b3768b3be35076f | [
"BSD-3-Clause"
] | null | null | null | holoviews/tests/core/data/testxarrayinterface.py | mkp170791/holoviews | a17f5c66eac7c23817e31edd3b3768b3be35076f | [
"BSD-3-Clause"
] | null | null | null | import datetime as dt
from collections import OrderedDict
from unittest import SkipTest
import numpy as np
try:
import xarray as xr
except:
raise SkipTest("Could not import xarray, skipping XArrayInterface tests.")
from holoviews.core.data import Dataset, concat
from holoviews.core.dimension import Dimension
from holoviews.core.spaces import HoloMap
from holoviews.element import Image, RGB, HSV, QuadMesh
from .testimageinterface import (
BaseImageElementInterfaceTests, BaseRGBElementInterfaceTests,
BaseHSVElementInterfaceTests
)
from .testgridinterface import BaseGridInterfaceTests
class XArrayInterfaceTests(BaseGridInterfaceTests):
"""
Tests for xarray interface
"""
datatype = 'xarray'
data_type = xr.Dataset
__test__ = True
def get_irregular_dataarray(self, invert_y=True):
multiplier = -1 if invert_y else 1
x = np.arange(2, 62, 3)
y = np.arange(2, 12, 2) * multiplier
da = xr.DataArray(
data=[np.arange(100).reshape(5, 20)],
coords=OrderedDict([('band', [1]), ('x', x), ('y', y)]),
dims=['band', 'y','x'],
attrs={'transform': (3, 0, 2, 0, -2, -2)})
xs, ys = (np.tile(x[:, np.newaxis], len(y)).T,
np.tile(y[:, np.newaxis], len(x)))
return da.assign_coords(**{'xc': xr.DataArray(xs, dims=('y','x')),
'yc': xr.DataArray(ys, dims=('y','x')),})
def test_xarray_dataset_with_scalar_dim_canonicalize(self):
xs = [0, 1]
ys = [0.1, 0.2, 0.3]
zs = np.array([[[0, 1], [2, 3], [4, 5]]])
xrarr = xr.DataArray(zs, coords={'x': xs, 'y': ys, 't': [1]}, dims=['t', 'y', 'x'])
xrds = xr.Dataset({'v': xrarr})
ds = Dataset(xrds, kdims=['x', 'y'], vdims=['v'], datatype=['xarray'])
canonical = ds.dimension_values(2, flat=False)
self.assertEqual(canonical.ndim, 2)
expected = np.array([[0, 1], [2, 3], [4, 5]])
self.assertEqual(canonical, expected)
def test_xarray_dataset_names_and_units(self):
xs = [0.1, 0.2, 0.3]
ys = [0, 1]
zs = np.array([[0, 1], [2, 3], [4, 5]])
da = xr.DataArray(zs, coords=[('x_dim', xs), ('y_dim', ys)], name="data_name", dims=['y_dim', 'x_dim'])
da.attrs['long_name'] = "data long name"
da.attrs['units'] = "array_unit"
da.x_dim.attrs['units'] = "x_unit"
da.y_dim.attrs['long_name'] = "y axis long name"
dataset = Dataset(da)
self.assertEqual(dataset.get_dimension("x_dim"), Dimension("x_dim", unit="x_unit"))
self.assertEqual(dataset.get_dimension("y_dim"), Dimension("y_dim", label="y axis long name"))
self.assertEqual(dataset.get_dimension("data_name"),
Dimension("data_name", label="data long name", unit="array_unit"))
def test_xarray_dataset_dataarray_vs_dataset(self):
xs = [0.1, 0.2, 0.3]
ys = [0, 1]
zs = np.array([[0, 1], [2, 3], [4, 5]])
da = xr.DataArray(zs, coords=[('x_dim', xs), ('y_dim', ys)], name="data_name", dims=['y_dim', 'x_dim'])
da.attrs['long_name'] = "data long name"
da.attrs['units'] = "array_unit"
da.x_dim.attrs['units'] = "x_unit"
da.y_dim.attrs['long_name'] = "y axis long name"
ds = da.to_dataset()
dataset_from_da = Dataset(da)
dataset_from_ds = Dataset(ds)
self.assertEqual(dataset_from_da, dataset_from_ds)
# same with reversed names:
da_rev = xr.DataArray(zs, coords=[('x_dim', xs), ('y_dim', ys)], name="data_name", dims=['x_dim', 'y_dim'])
da_rev.attrs['long_name'] = "data long name"
da_rev.attrs['units'] = "array_unit"
da_rev.x_dim.attrs['units'] = "x_unit"
da_rev.y_dim.attrs['long_name'] = "y axis long name"
ds_rev = da_rev.to_dataset()
dataset_from_da_rev = Dataset(da_rev)
dataset_from_ds_rev = Dataset(ds_rev)
self.assertEqual(dataset_from_da_rev, dataset_from_ds_rev)
def test_xarray_override_dims(self):
xs = [0.1, 0.2, 0.3]
ys = [0, 1]
zs = np.array([[0, 1], [2, 3], [4, 5]])
da = xr.DataArray(zs, coords=[('x_dim', xs), ('y_dim', ys)], name="data_name", dims=['y_dim', 'x_dim'])
da.attrs['long_name'] = "data long name"
da.attrs['units'] = "array_unit"
da.x_dim.attrs['units'] = "x_unit"
da.y_dim.attrs['long_name'] = "y axis long name"
ds = Dataset(da, kdims=["x_dim", "y_dim"], vdims=["z_dim"])
x_dim = Dimension("x_dim")
y_dim = Dimension("y_dim")
z_dim = Dimension("z_dim")
self.assertEqual(ds.kdims[0], x_dim)
self.assertEqual(ds.kdims[1], y_dim)
self.assertEqual(ds.vdims[0], z_dim)
ds_from_ds = Dataset(da.to_dataset(), kdims=["x_dim", "y_dim"], vdims=["data_name"])
self.assertEqual(ds_from_ds.kdims[0], x_dim)
self.assertEqual(ds_from_ds.kdims[1], y_dim)
data_dim = Dimension("data_name")
self.assertEqual(ds_from_ds.vdims[0], data_dim)
def test_xarray_coord_ordering(self):
data = np.zeros((3,4,5))
coords = OrderedDict([('b', range(3)), ('c', range(4)), ('a', range(5))])
darray = xr.DataArray(data, coords=coords, dims=['b', 'c', 'a'])
dataset = xr.Dataset({'value': darray}, coords=coords)
ds = Dataset(dataset)
self.assertEqual(ds.kdims, ['b', 'c', 'a'])
def test_irregular_and_regular_coordinate_inference(self):
data = self.get_irregular_dataarray()
ds = Dataset(data, vdims='Value')
self.assertEqual(ds.kdims, [Dimension('band'), Dimension('x'), Dimension('y')])
self.assertEqual(ds.dimension_values(3, flat=False), data.values[:, ::-1].transpose([1, 2, 0]))
def test_irregular_and_regular_coordinate_inference_inverted(self):
data = self.get_irregular_dataarray(False)
ds = Dataset(data, vdims='Value')
self.assertEqual(ds.kdims, [Dimension('band'), Dimension('x'), Dimension('y')])
self.assertEqual(ds.dimension_values(3, flat=False), data.values.transpose([1, 2, 0]))
def test_irregular_and_regular_coordinate_explicit_regular_coords(self):
data = self.get_irregular_dataarray()
ds = Dataset(data, ['x', 'y'], vdims='Value')
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('y')])
self.assertEqual(ds.dimension_values(2, flat=False), data.values[0, ::-1])
def test_irregular_and_regular_coordinate_explicit_regular_coords_inverted(self):
data = self.get_irregular_dataarray(False)
ds = Dataset(data, ['x', 'y'], vdims='Value')
self.assertEqual(ds.kdims, [Dimension('x'), Dimension('y')])
self.assertEqual(ds.dimension_values(2, flat=False), data.values[0])
def test_irregular_and_regular_coordinate_explicit_irregular_coords(self):
data = self.get_irregular_dataarray()
ds = Dataset(data, ['xc', 'yc'], vdims='Value')
self.assertEqual(ds.kdims, [Dimension('xc'), Dimension('yc')])
self.assertEqual(ds.dimension_values(2, flat=False), data.values[0])
def test_irregular_and_regular_coordinate_explicit_irregular_coords_inverted(self):
data = self.get_irregular_dataarray(False)
ds = Dataset(data, ['xc', 'yc'], vdims='Value')
self.assertEqual(ds.kdims, [Dimension('xc'), Dimension('yc')])
self.assertEqual(ds.dimension_values(2, flat=False), data.values[0])
def test_concat_grid_3d_shape_mismatch(self):
arr1 = np.random.rand(3, 2)
arr2 = np.random.rand(2, 3)
ds1 = Dataset(([0, 1], [1, 2, 3], arr1), ['x', 'y'], 'z')
ds2 = Dataset(([0, 1, 2], [1, 2], arr2), ['x', 'y'], 'z')
hmap = HoloMap({1: ds1, 2: ds2})
arr = np.full((3, 3, 2), np.NaN)
arr[:, :2, 0] = arr1
arr[:2, :, 1] = arr2
ds = Dataset(([1, 2], [0, 1, 2], [1, 2, 3], arr), ['Default', 'x', 'y'], 'z')
self.assertEqual(concat(hmap), ds)
def test_zero_sized_coordinates_range(self):
da = xr.DataArray(np.empty((2, 0)), dims=('y', 'x'), coords={'x': [], 'y': [0 ,1]}, name='A')
ds = Dataset(da)
x0, x1 = ds.range('x')
self.assertTrue(np.isnan(x0))
self.assertTrue(np.isnan(x1))
z0, z1 = ds.range('A')
self.assertTrue(np.isnan(z0))
self.assertTrue(np.isnan(z1))
def test_datetime_bins_range(self):
xs = [dt.datetime(2018, 1, i) for i in range(1, 11)]
ys = np.arange(10)
array = np.random.rand(10, 10)
ds = QuadMesh((xs, ys, array))
self.assertEqual(ds.interface.datatype, 'xarray')
expected = (np.datetime64(dt.datetime(2017, 12, 31, 12, 0)),
np.datetime64(dt.datetime(2018, 1, 10, 12, 0)))
self.assertEqual(ds.range('x'), expected)
def test_datetime64_bins_range(self):
xs = [np.datetime64(dt.datetime(2018, 1, i)) for i in range(1, 11)]
ys = np.arange(10)
array = np.random.rand(10, 10)
ds = QuadMesh((xs, ys, array))
self.assertEqual(ds.interface.datatype, 'xarray')
expected = (np.datetime64(dt.datetime(2017, 12, 31, 12, 0)),
np.datetime64(dt.datetime(2018, 1, 10, 12, 0)))
self.assertEqual(ds.range('x'), expected)
def test_select_dropped_dimensions_restoration(self):
d = np.random.randn(3, 8)
da = xr.DataArray(d, name='stuff', dims=['chain', 'value'],
coords=dict(chain=range(d.shape[0]), value=range(d.shape[1])))
ds = Dataset(da)
t = ds.select(chain=0)
self.assertEqual(t.data.dims , dict(chain=1,value=8))
self.assertEqual(t.data.stuff.shape , (1,8))
def test_dataset_array_init_hm(self):
"Tests support for arrays (homogeneous)"
raise SkipTest("Not supported")
# Disabled tests for NotImplemented methods
def test_dataset_add_dimensions_values_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_reverse_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_vdim_hm_alias(self):
raise SkipTest("Not supported")
def test_dataset_sort_vdim_hm(self):
raise SkipTest("Not supported")
def test_dataset_sort_reverse_vdim_hm(self):
raise SkipTest("Not supported")
def test_dataset_sample_hm(self):
raise SkipTest("Not supported")
def test_dataset_sample_hm_alias(self):
raise SkipTest("Not supported")
class DaskXArrayInterfaceTest(XArrayInterfaceTests):
"""
Tests for XArray interface wrapping dask arrays
"""
def setUp(self):
try:
import dask.array # noqa
except:
raise SkipTest('Dask could not be imported, cannot test '
'dask arrays with XArrayInterface')
super(DaskXArrayInterfaceTest, self).setUp()
def init_column_data(self):
import dask.array
self.xs = np.array(range(11))
self.xs_2 = self.xs**2
self.y_ints = self.xs*2
dask_y = dask.array.from_array(np.array(self.y_ints), 2)
self.dataset_hm = Dataset((self.xs, dask_y),
kdims=['x'], vdims=['y'])
self.dataset_hm_alias = Dataset((self.xs, dask_y),
kdims=[('x', 'X')], vdims=[('y', 'Y')])
def init_grid_data(self):
import dask.array
self.grid_xs = [0, 1]
self.grid_ys = [0.1, 0.2, 0.3]
self.grid_zs = np.array([[0, 1], [2, 3], [4, 5]])
dask_zs = dask.array.from_array(self.grid_zs, 2)
self.dataset_grid = self.element((self.grid_xs, self.grid_ys,
dask_zs), kdims=['x', 'y'],
vdims=['z'])
self.dataset_grid_alias = self.element((self.grid_xs, self.grid_ys,
dask_zs), kdims=[('x', 'X'), ('y', 'Y')],
vdims=[('z', 'Z')])
self.dataset_grid_inv = self.element((self.grid_xs[::-1], self.grid_ys[::-1],
dask_zs), kdims=['x', 'y'],
vdims=['z'])
def test_xarray_dataset_with_scalar_dim_canonicalize(self):
import dask.array
xs = [0, 1]
ys = [0.1, 0.2, 0.3]
zs = dask.array.from_array(np.array([[[0, 1], [2, 3], [4, 5]]]), 2)
xrarr = xr.DataArray(zs, coords={'x': xs, 'y': ys, 't': [1]}, dims=['t', 'y', 'x'])
xrds = xr.Dataset({'v': xrarr})
ds = Dataset(xrds, kdims=['x', 'y'], vdims=['v'], datatype=['xarray'])
canonical = ds.dimension_values(2, flat=False)
self.assertEqual(canonical.ndim, 2)
expected = np.array([[0, 1], [2, 3], [4, 5]])
self.assertEqual(canonical, expected)
class ImageElement_XArrayInterfaceTests(BaseImageElementInterfaceTests):
datatype = 'xarray'
data_type = xr.Dataset
__test__ = True
def init_data(self):
self.image = Image((self.xs, self.ys, self.array))
self.image_inv = Image((self.xs[::-1], self.ys[::-1], self.array[::-1, ::-1]))
def test_dataarray_dimension_order(self):
x = np.linspace(-3, 7, 53)
y = np.linspace(-5, 8, 89)
z = np.exp(-1*(x**2 + y[:, np.newaxis]**2))
array = xr.DataArray(z, coords=[y, x], dims=['x', 'y'])
img = Image(array)
self.assertEqual(img.kdims, [Dimension('x'), Dimension('y')])
def test_dataarray_shape(self):
x = np.linspace(-3, 7, 53)
y = np.linspace(-5, 8, 89)
z = np.exp(-1*(x**2 + y[:, np.newaxis]**2))
array = xr.DataArray(z, coords=[y, x], dims=['x', 'y'])
img = Image(array, ['x', 'y'])
self.assertEqual(img.interface.shape(img, gridded=True), (53, 89))
def test_dataarray_shape_transposed(self):
x = np.linspace(-3, 7, 53)
y = np.linspace(-5, 8, 89)
z = np.exp(-1*(x**2 + y[:, np.newaxis]**2))
array = xr.DataArray(z, coords=[y, x], dims=['x', 'y'])
img = Image(array, ['y', 'x'])
self.assertEqual(img.interface.shape(img, gridded=True), (89, 53))
def test_select_on_transposed_dataarray(self):
x = np.linspace(-3, 7, 53)
y = np.linspace(-5, 8, 89)
z = np.exp(-1*(x**2 + y[:, np.newaxis]**2))
array = xr.DataArray(z, coords=[y, x], dims=['x', 'y'])
img = Image(array)[1:3]
self.assertEqual(img['z'], Image(array.sel(x=slice(1, 3)))['z'])
def test_dataarray_with_no_coords(self):
expected_xs = list(range(2))
expected_ys = list(range(3))
zs = np.arange(6).reshape(2, 3)
xrarr = xr.DataArray(zs, dims=('x','y'))
img = Image(xrarr)
self.assertTrue(all(img.data.x == expected_xs))
self.assertTrue(all(img.data.y == expected_ys))
img = Image(xrarr, kdims=['x', 'y'])
self.assertTrue(all(img.data.x == expected_xs))
self.assertTrue(all(img.data.y == expected_ys))
def test_dataarray_with_some_coords(self):
xs = [4.2, 1]
zs = np.arange(6).reshape(2, 3)
xrarr = xr.DataArray(zs, dims=('x','y'), coords={'x': xs})
with self.assertRaises(ValueError):
Image(xrarr)
with self.assertRaises(ValueError):
Image(xrarr, kdims=['x', 'y'])
class RGBElement_XArrayInterfaceTests(BaseRGBElementInterfaceTests):
datatype = 'xarray'
data_type = xr.Dataset
__test__ = True
def init_data(self):
self.rgb = RGB((self.xs, self.ys, self.rgb_array[:, :, 0],
self.rgb_array[:, :, 1], self.rgb_array[:, :, 2]))
class RGBElement_PackedXArrayInterfaceTests(BaseRGBElementInterfaceTests):
datatype = 'xarray'
data_type = xr.Dataset
__test__ = True
def init_data(self):
self.rgb = RGB((self.xs, self.ys, self.rgb_array))
class HSVElement_XArrayInterfaceTest(BaseHSVElementInterfaceTests):
datatype = 'xarray'
data_type = xr.Dataset
__test__ = True
def init_data(self):
self.hsv = HSV((self.xs, self.ys, self.hsv_array[:, :, 0],
self.hsv_array[:, :, 1], self.hsv_array[:, :, 2]))
| 40.475248 | 115 | 0.582069 |
90e9d500ee450f21e793e778df5565eca112230c | 2,188 | py | Python | sdq/content/hrs_blue/bias.py | Eb-Zeero/tacapi | 2c94d037e2dd19bf5d1f67ea5ae34cb6cc6eef61 | [
"MIT"
] | null | null | null | sdq/content/hrs_blue/bias.py | Eb-Zeero/tacapi | 2c94d037e2dd19bf5d1f67ea5ae34cb6cc6eef61 | [
"MIT"
] | 5 | 2021-03-18T21:39:50.000Z | 2022-03-11T23:36:18.000Z | sdq/content/hrs_blue/bias.py | Eb-Zeero/tacapi | 2c94d037e2dd19bf5d1f67ea5ae34cb6cc6eef61 | [
"MIT"
] | null | null | null | from bokeh.models import ColumnDataSource, Whisker
from bokeh.plotting import figure
from flask import g
from sdq.queries.hrs import bias_counts_query, bias_gradient_query
from sdq.util import bokeh_plot_grid
def plot_counts(arm, name):
source = bias_counts_query(str(g.dates['start_date']), str(g.dates['end_date']), arm)
p = figure(name=name, plot_height=150, plot_width=200, title='Count vs time', x_axis_type='datetime')
p.xaxis.axis_label = 'Date'
p.yaxis.axis_label = 'Count'
p.circle(source=ColumnDataSource(source),
x='Date', y='bias_med',
size=15, line_alpha=0.9, fill_alpha=0.8, color='blue')
# create the coordinates for the errorbars
err_xs = []
err_ys = []
err_min = []
err_max = []
for x, y, yerr in zip(source['Date'], source['bias_med'], source['bias_std']):
err_xs.append((x, x))
err_ys.append((y - yerr, y + yerr))
err_min.append(y - yerr)
err_max.append(y + yerr)
p.multi_line(err_xs, err_ys, color='black', level="underlay", line_width=1)
p.dash(y=err_max, x=source['Date'], color='black', level="underlay", line_width=1, size=15)
p.dash(y=err_min, x=source['Date'], color='black', level="underlay", line_width=1, size=15)
return p
def plot_gradient(arm, gradient, name):
source = bias_gradient_query(str(g.dates['start_date']), str(g.dates['end_date']), arm, gradient)
p = figure(name=name, plot_height=150, plot_width=200, title='Gradient cf{gradient}'.format(gradient=gradient),
x_axis_type='datetime')
p.xaxis.axis_label = 'Date'
p.yaxis.axis_label = 'bias_cf{gradient}'.format(gradient=gradient)
p.circle(source=ColumnDataSource(source), y='bias_cf{gradient}'.format(gradient=gradient),
x='Date', size=15, line_alpha=0.9, fill_alpha=0.8, color='blue')
return p
title = 'HRS Bias Levels'
content = bokeh_plot_grid(2,
plot_counts('blue', 'bias count medians'),
plot_gradient('blue', 'x', 'bias CFX'),
plot_gradient('blue', 'y', 'bias CFY')
)
description = 'Bias levels for HRS'
| 38.385965 | 115 | 0.640311 |
a304c9961e14f2c554202af9282550947b4e45ab | 12,387 | py | Python | pybamm/solvers/ode_solver.py | zlgenuine/pybamm | 5c43d17225710c5bea8e61b3863688eb7080e678 | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/ode_solver.py | zlgenuine/pybamm | 5c43d17225710c5bea8e61b3863688eb7080e678 | [
"BSD-3-Clause"
] | null | null | null | pybamm/solvers/ode_solver.py | zlgenuine/pybamm | 5c43d17225710c5bea8e61b3863688eb7080e678 | [
"BSD-3-Clause"
] | null | null | null | #
# Base solver class
#
import casadi
import pybamm
import numpy as np
from .base_solver import add_external
class OdeSolver(pybamm.BaseSolver):
"""Solve a discretised model.
Parameters
----------
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
"""
def __init__(self, method=None, rtol=1e-6, atol=1e-6):
super().__init__(method, rtol, atol)
self.name = "Base ODE solver"
def compute_solution(self, model, t_eval, inputs=None):
"""Calculate the solution of the model at specified times.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
t_eval : numeric type
The times at which to compute the solution
inputs : dict, optional
Any input parameters to pass to the model when solving
"""
timer = pybamm.Timer()
# Set inputs and external
self.set_inputs_and_external(inputs)
# Solve
solve_start_time = timer.time()
pybamm.logger.info("Calling ODE solver")
solution = self.integrate(
self.dydt,
self.y0,
t_eval,
events=self.event_funs,
mass_matrix=model.mass_matrix.entries,
jacobian=self.jacobian,
)
solve_time = timer.time() - solve_start_time
# Identify the event that caused termination
termination = self.get_termination_reason(solution, self.events)
return solution, solve_time, termination
def set_up(self, model, inputs=None):
"""Unpack model, perform checks, simplify and calculate jacobian.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
inputs : dict, optional
Any input parameters to pass to the model when solving
Raises
------
:class:`pybamm.SolverError`
If the model contains any algebraic equations (in which case a DAE solver
should be used instead)
"""
# Check for algebraic equations
if len(model.algebraic) > 0:
raise pybamm.SolverError(
"""Cannot use ODE solver to solve model with DAEs"""
)
inputs = inputs or {}
# create simplified rhs and event expressions
concatenated_rhs = model.concatenated_rhs
events = model.events
if model.use_simplify:
# set up simplification object, for re-use of dict
simp = pybamm.Simplification()
# create simplified rhs and event expressions
pybamm.logger.info("Simplifying RHS")
concatenated_rhs = simp.simplify(concatenated_rhs)
pybamm.logger.info("Simplifying events")
events = {name: simp.simplify(event) for name, event in events.items()}
y0 = model.concatenated_initial_conditions[:, 0]
if model.use_jacobian:
# Create Jacobian from concatenated rhs
y = pybamm.StateVector(slice(0, np.size(y0)))
# set up Jacobian object, for re-use of dict
jacobian = pybamm.Jacobian()
pybamm.logger.info("Calculating jacobian")
jac_rhs = jacobian.jac(concatenated_rhs, y)
model.jacobian = jac_rhs
model.jacobian_rhs = jac_rhs
if model.use_simplify:
pybamm.logger.info("Simplifying jacobian")
jac_rhs = simp.simplify(jac_rhs)
if model.convert_to_format == "python":
pybamm.logger.info("Converting jacobian to python")
jac_rhs = pybamm.EvaluatorPython(jac_rhs)
else:
jac_rhs = None
if model.convert_to_format == "python":
pybamm.logger.info("Converting RHS to python")
concatenated_rhs = pybamm.EvaluatorPython(concatenated_rhs)
pybamm.logger.info("Converting events to python")
events = {
name: pybamm.EvaluatorPython(event) for name, event in events.items()
}
# Create event-dependent function to evaluate events
def get_event_class(event):
return EvalEvent(event.evaluate)
# Create function to evaluate jacobian
if jac_rhs is not None:
jacobian = Jacobian(jac_rhs.evaluate)
else:
jacobian = None
# Add the solver attributes
# Note: these are the (possibly) converted to python version rhs, algebraic
# etc. The expression tree versions of these are attributes of the model
self.y0 = y0
self.dydt = Dydt(model, concatenated_rhs.evaluate)
self.events = events
self.event_funs = [get_event_class(event) for event in events.values()]
self.jacobian = jacobian
pybamm.logger.info("Finish solver set-up")
def set_up_casadi(self, model, inputs=None):
"""Convert model to casadi format and use their inbuilt functionalities.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate. Must have attributes rhs and
initial_conditions
inputs : dict, optional
Any input parameters to pass to the model when solving
Raises
------
:class:`pybamm.SolverError`
If the model contains any algebraic equations (in which case a DAE solver
should be used instead)
"""
# Check for algebraic equations
if len(model.algebraic) > 0:
raise pybamm.SolverError(
"""Cannot use ODE solver to solve model with DAEs"""
)
y0 = model.concatenated_initial_conditions[:, 0]
t_casadi = casadi.MX.sym("t")
y_casadi = casadi.MX.sym("y", len(y0))
inputs = inputs or {}
u_casadi = {name: casadi.MX.sym(name) for name in inputs.keys()}
if self.y_pad is not None:
y_ext = casadi.MX.sym("y_ext", len(self.y_pad))
y_casadi_w_ext = casadi.vertcat(y_casadi, y_ext)
else:
y_casadi_w_ext = y_casadi
pybamm.logger.info("Converting RHS to CasADi")
concatenated_rhs = model.concatenated_rhs.to_casadi(
t_casadi, y_casadi_w_ext, u_casadi
)
pybamm.logger.info("Converting events to CasADi")
casadi_events = {
name: event.to_casadi(t_casadi, y_casadi_w_ext, u_casadi)
for name, event in model.events.items()
}
# Create function to evaluate rhs
u_casadi_stacked = casadi.vertcat(*[u for u in u_casadi.values()])
concatenated_rhs_fn = casadi.Function(
"rhs", [t_casadi, y_casadi_w_ext, u_casadi_stacked], [concatenated_rhs]
)
# Create event-dependent function to evaluate events
def get_event_class(event):
casadi_event_fn = casadi.Function(
"event", [t_casadi, y_casadi_w_ext, u_casadi_stacked], [event]
)
return EvalEventCasadi(casadi_event_fn)
# Create function to evaluate jacobian
if model.use_jacobian:
pybamm.logger.info("Calculating jacobian")
casadi_jac = casadi.jacobian(concatenated_rhs, y_casadi)
casadi_jac_fn = casadi.Function(
"jacobian", [t_casadi, y_casadi_w_ext, u_casadi_stacked], [casadi_jac]
)
jacobian = JacobianCasadi(casadi_jac_fn)
else:
jacobian = None
# Add the solver attributes
self.y0 = y0
self.dydt = DydtCasadi(model, concatenated_rhs_fn)
self.events = model.events
self.event_funs = [get_event_class(event) for event in casadi_events.values()]
self.jacobian = jacobian
pybamm.logger.info("Finish solver set-up")
def set_inputs_and_external(self, inputs):
"""
Set values that are controlled externally, such as external variables and input
parameters
Parameters
----------
inputs : dict
Any input parameters to pass to the model when solving
"""
self.dydt.set_pad_ext(self.y_pad, self.y_ext)
self.dydt.set_inputs(inputs)
for evnt in self.event_funs:
evnt.set_pad_ext(self.y_pad, self.y_ext)
evnt.set_inputs(inputs)
if self.jacobian:
self.jacobian.set_pad_ext(self.y_pad, self.y_ext)
self.jacobian.set_inputs(inputs)
def integrate(
self, derivs, y0, t_eval, events=None, mass_matrix=None, jacobian=None
):
"""
Solve a model defined by dydt with initial conditions y0.
Parameters
----------
derivs : method
A function that takes in t and y and returns the time-derivative dydt
y0 : numeric type
The initial conditions
t_eval : numeric type
The times at which to compute the solution
events : method, optional
A function that takes in t and y and returns conditions for the solver to
stop
mass_matrix : array_like, optional
The (sparse) mass matrix for the chosen spatial method.
jacobian : method, optional
A function that takes in t and y and returns the Jacobian
"""
raise NotImplementedError
class SolverCallable:
"A class that will be called by the solver when integrating"
y_pad = None
y_ext = None
inputs = {}
inputs_casadi = casadi.DM()
def set_pad_ext(self, y_pad, y_ext):
self.y_pad = y_pad
self.y_ext = y_ext
def set_inputs(self, inputs):
self.inputs = inputs
self.inputs_casadi = casadi.vertcat(*[x for x in inputs.values()])
# Set up caller classes outside of the solver object to allow pickling
class Dydt(SolverCallable):
"Returns information about time derivatives at time t and state y"
def __init__(self, model, concatenated_rhs_fn):
self.model = model
self.concatenated_rhs_fn = concatenated_rhs_fn
def __call__(self, t, y):
pybamm.logger.debug("Evaluating RHS for {} at t={}".format(self.model.name, t))
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
dy = self.concatenated_rhs_fn(t, y, self.inputs, known_evals={})[0]
return dy[:, 0]
class DydtCasadi(Dydt):
"Returns information about time derivatives at time t and state y, with CasADi"
def __call__(self, t, y):
pybamm.logger.debug("Evaluating RHS for {} at t={}".format(self.model.name, t))
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
dy = self.concatenated_rhs_fn(t, y, self.inputs_casadi).full()
return dy[:, 0]
class EvalEvent(SolverCallable):
"Returns information about events at time t and state y"
def __init__(self, event_fn):
self.event_fn = event_fn
def __call__(self, t, y):
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
return self.event_fn(t, y, self.inputs)
class EvalEventCasadi(EvalEvent):
"Returns information about events at time t and state y"
def __init__(self, event_fn):
self.event_fn = event_fn
def __call__(self, t, y):
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
return self.event_fn(t, y, self.inputs_casadi)
class Jacobian(SolverCallable):
"Returns information about the jacobian at time t and state y"
def __init__(self, jac_fn):
self.jac_fn = jac_fn
def __call__(self, t, y):
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
return self.jac_fn(t, y, self.inputs, known_evals={})[0]
class JacobianCasadi(Jacobian):
"Returns information about the jacobian at time t and state y, with CasADi"
def __call__(self, t, y):
y = y[:, np.newaxis]
y = add_external(y, self.y_pad, self.y_ext)
return self.jac_fn(t, y, self.inputs_casadi)
| 33.569106 | 87 | 0.614677 |
c9abfa28d3618816a66ed0873f9f061beb9bb3b8 | 8,432 | py | Python | test_bank/borrowing/views.py | Ursidours/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | 1 | 2021-11-13T01:30:30.000Z | 2021-11-13T01:30:30.000Z | test_bank/borrowing/views.py | arnaudblois/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | null | null | null | test_bank/borrowing/views.py | arnaudblois/django-tech-test | a96e1d73c9221458ca68b999d817881863a60584 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.contrib import messages
from django.core.exceptions import SuspiciousOperation
from django.http import Http404
from django.core.urlresolvers import reverse
from django.views.generic import (
DetailView,
UpdateView,
CreateView,
FormView
)
from django.http.response import (
HttpResponseNotAllowed,
HttpResponseRedirect,
HttpResponseBadRequest,
JsonResponse,
)
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext as _
from django.urls.base import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.utils import timezone
from django.db.models import Count
from .forms import UserBorrowerForm, VeriFyPhoneForm, LoanForm
from .models import BorrowerProfile, Business, Loan
@login_required
def verify_phone(request):
"""
returns status 200 if the verification number was sent to the user,
else returns 400
"""
if not request.method == "POST":
return HttpResponseNotAllowed(['POST'])
form = VeriFyPhoneForm(request.POST)
if form.is_valid():
form.process()
return JsonResponse({})
else:
return HttpResponseBadRequest(_("Your phone number is not valid. Try the format +447xxxxxxxxx"))
# ----------------------------------------------------
# Borrower Profile Views
# ----------------------------------------------------
@login_required
def home_view(request):
"""
returns the home view of the borrowing section. Gives a summary of
whether the borrower profile account has been properly set up and a
summary of the business registered and loans subscribed
"""
try:
borrower = BorrowerProfile.objects.get(user=request.user)
businesses = Business.objects.filter(owner=borrower).annotate(loan_nb=Count('loan', distinct=True))
loans = borrower.loan_set.all()
except BorrowerProfile.DoesNotExist:
borrower, businesses, loans = None, None, None
return render(
request, "borrowing/home.html",
{"borrower": borrower, 'businesses': businesses, 'loans': loans}
)
class BorrowerCreateView(LoginRequiredMixin, FormView):
"""
CBV view to activate the user's borrower profile. Instantiate a UserBorrowerForm,
an hybrid form to update both first_name and last_name and the profile info at
the same time.
"""
template_name = 'borrowing/account_activation.html'
form_class = UserBorrowerForm
success_url = reverse_lazy('borrowing:create_business')
def dispatch(self, request, *args, **kwargs):
""" if the user is already a borrower, redirect to borrowing:home"""
if BorrowerProfile.objects.filter(user_id=self.request.user.id).exists():
return HttpResponseRedirect(reverse('borrowing:home'))
else:
return super().dispatch(request, *args, **kwargs)
def get_form(self):
data = self.request.POST if self.request.method == "POST" else None
return self.form_class(data=data, user=self.request.user)
def form_valid(self, form):
form.save()
messages.success(self.request, _("You are now able to register your business for a loan"))
return super().form_valid(form)
class BorrowerProfileRequiredMixin(LoginRequiredMixin):
"""
CBV mixin which verifies that the current user is authenticated and
has a borrower profile.
"""
def dispatch(self, request, *args, **kwargs):
try:
self.borrower = BorrowerProfile.objects.get(user_id=self.request.user.id)
except BorrowerProfile.DoesNotExist:
messages.error(request, _("You must have a valid borrower profile to access this page"))
return HttpResponseRedirect(reverse('borrowing:home'))
return super().dispatch(request, *args, **kwargs)
# ---------------------------------
# Business Views
# --------------------------------
class BusinessCreateView(SuccessMessageMixin, BorrowerProfileRequiredMixin, CreateView):
""" CBV creating a new business for the current user """
model = Business
fields = ['name', 'address', 'company_number', 'sector']
success_message = _("Your business has been successfully added.")
success_url = reverse_lazy('borrowing:home')
template_name = "borrowing/create_business_form.html"
def form_valid(self, form):
form.instance.owner = self.borrower
form.save()
return super().form_valid(form)
class BusinessUpdateView(SuccessMessageMixin, BorrowerProfileRequiredMixin, UpdateView):
""" CBV to update an existing business -- only possible if the business has
no related loan and belongs to the user """
model = Business
fields = ['name', 'address', 'company_number', 'sector']
success_message = _("Your business has been successfully edited.")
success_url = reverse_lazy('borrowing:home')
template_name = "borrowing/update_business_form.html"
def get_object(self):
"""
Only get the Business record for the user making the request
and only if there is no related loans
"""
business = get_object_or_404(Business, owner=self.borrower, pk=self.kwargs['pk'])
if business.loan_set.exists():
raise SuspiciousOperation(_('You cannot delete a business with existing loans'))
return business
@login_required
def delete_business(request, pk):
""" POST only - deletes the business specified if there is no related loans """
if not request.method == "POST":
return HttpResponseNotAllowed(['POST', ])
business = get_object_or_404(Business, owner__user=request.user, pk=pk)
if business.loan_set.exists():
raise SuspiciousOperation(_('You cannot delete a business with existing loans'))
business.delete()
messages.success(request, _("The business {0} has been deleted").format(business.name))
return HttpResponseRedirect(reverse('borrowing:home'))
# ----------------------
# Loan
# -----------------------
class LoanCreateView(SuccessMessageMixin, BorrowerProfileRequiredMixin, CreateView):
""" CBV creating a new loan for the current BorrowerProfile """
model = Loan
form_class = LoanForm
success_message = _(
"Your loan request was successful and will be reviewed by "
"our financial services shortly"
)
success_url = reverse_lazy('borrowing:home')
template_name = "borrowing/loan_form.html"
def get_form(self):
"""
makes sure the business field can only contain businesses belonging to the current profile
sets the interest rate to read_only (that should be something taken in charge by javascript)
it is displayed nonetheless as the user needs to know what she's signing for.
"""
form = super().get_form()
form.fields['business'].queryset = Business.objects.filter(owner=self.borrower)
form.fields['interest_rate'].widget.attrs = {'readonly': True, }
form.fields['interest_rate'].initial = 0.05
return form
def form_valid(self, form):
form.instance.borrower = self.borrower
form.save()
return super().form_valid(form)
class LoanDetailView(BorrowerProfileRequiredMixin, DetailView):
"""
Simplistic CBV showing a loan and offering an option to cancel it
if it is still pending
"""
model = Loan
def get_object(self):
# Only get the Business record for the user making the request
loan = get_object_or_404(Loan, pk=self.kwargs['pk'], borrower=self.borrower)
return loan
@login_required
def cancel_loan_request(request, pk):
"""
cancels an existing loan by setting its status to 4
only if its status was already 0
"""
if not request.method == "POST":
return HttpResponseNotAllowed(['POST', ])
loan = get_object_or_404(Loan, pk=pk, borrower__user=request.user)
if loan.status != 0:
raise SuspiciousOperation(_("Processed loans cannot be deleted"))
loan.status = 4 # status 4 means "cancelled"
loan.modified_at = timezone.now()
loan.save()
messages.success(request, _("The loan request has been cancelled"))
return HttpResponseRedirect(reverse('borrowing:home'))
| 36.982456 | 107 | 0.682163 |
d9d5b46815e75e56a9c977bd5d0e594ee97ba71b | 688 | py | Python | setup.py | ixjlyons/qtgallery | 9538b83b8d29090601a41f6d200c4cfda1b1b5fe | [
"MIT"
] | null | null | null | setup.py | ixjlyons/qtgallery | 9538b83b8d29090601a41f6d200c4cfda1b1b5fe | [
"MIT"
] | 2 | 2021-11-15T21:27:06.000Z | 2021-11-15T21:28:39.000Z | setup.py | ixjlyons/qtgallery | 9538b83b8d29090601a41f6d200c4cfda1b1b5fe | [
"MIT"
] | null | null | null | import os
import codecs
from setuptools import setup
def read(fp):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, fp), 'r') as f:
return f.read()
exec(read(os.path.join("qtgallery", "_version.py")))
setup(
name="qtgallery",
version=__version__,
description="sphinx-gallery scraper for Qt examples and tutorials",
long_description=read("README.rst"),
author="Kenneth Lyons",
author_email="ixjlyons@gmail.com",
license="MIT",
packages=["qtgallery"],
install_requires=[
"qtpy",
"pyvirtualdisplay",
"sphinx_gallery",
"pillow",
"sphinx_rtd_theme",
],
)
| 22.193548 | 71 | 0.639535 |
decb25cf1c3ef6c3e00c74a000b288270efc88e1 | 1,126 | py | Python | test_hwang.py | sjyk/deeplens-cv | 8383e6c377a93304534c46e3c9fd7821cc068d98 | [
"MIT"
] | 11 | 2019-10-07T22:06:30.000Z | 2020-08-26T22:10:53.000Z | test_hwang.py | sjyk/deeplens-cv | 8383e6c377a93304534c46e3c9fd7821cc068d98 | [
"MIT"
] | 16 | 2019-11-02T00:32:00.000Z | 2022-02-10T00:23:32.000Z | test_hwang.py | sjyk/deeplens-cv | 8383e6c377a93304534c46e3c9fd7821cc068d98 | [
"MIT"
] | 9 | 2019-10-07T13:33:13.000Z | 2020-09-27T09:50:58.000Z | import sys
from deeplens.dataflow.agg import count
from deeplens.full_manager.full_manager import FullStorageManager
from deeplens.optimizer.deeplens import DeepLensOptimizer
from deeplens.struct import CustomTagger, VideoStream, Box
from deeplens.tracking.contour import KeyPoints
from deeplens.tracking.event import Filter, ActivityMetric
if len(sys.argv) < 2:
print("Enter filename as argv[1]")
exit(1)
filename = sys.argv[1]
vs = VideoStream(filename, hwang=True, rows=range(0,8000,400))
region = Box(200, 550, 350, 750)
d = DeepLensOptimizer()
pipeline = vs[KeyPoints()][ActivityMetric('one', region)][Filter('one', [-0.25, -0.25, 1, -0.25, -0.25], 1.5, delay=10)]
# pipeline = d.optimize(pipeline)
result = count(pipeline, ['one'], stats=True)
print("Hwang:", result)
vs = VideoStream(filename, hwang=False, limit=500)
region = Box(200, 550, 350, 750)
d = DeepLensOptimizer()
pipeline = vs[KeyPoints()][ActivityMetric('one', region)][Filter('one', [-0.25, -0.25, 1, -0.25, -0.25], 1.5, delay=10)]
# pipeline = d.optimize(pipeline)
result = count(pipeline, ['one'], stats=True)
print("OpenCV:", result)
| 31.277778 | 120 | 0.721137 |
3b0c0e914a71a6ee9824fd7ba414d4c9394eee44 | 266 | py | Python | client/python/ntcore/__about__.py | nantutech/ntcore | 2daacad2435c30f116b76685aa579b4665bff9f7 | [
"Apache-2.0"
] | 9 | 2021-06-08T00:15:47.000Z | 2021-11-02T08:34:03.000Z | client/python/ntcore/__about__.py | nantutech/ntcore | 2daacad2435c30f116b76685aa579b4665bff9f7 | [
"Apache-2.0"
] | 1 | 2021-08-20T04:05:39.000Z | 2021-08-20T04:05:39.000Z | client/python/ntcore/__about__.py | nantutech/ntcore | 2daacad2435c30f116b76685aa579b4665bff9f7 | [
"Apache-2.0"
] | 1 | 2021-08-13T21:18:24.000Z | 2021-08-13T21:18:24.000Z | # -*- coding: utf-8 -*-
__description__ = "Python client for interfacing with NTCore"
__license__ = "Apache 2.0"
__maintainer__ = "Jinxiong Tan"
__maintainer_email__ = "jtan@nantutech.com"
__title__ = "ntcore"
__url__ = "https://www.nantu.io/"
__version__ = "0.0.2" | 29.555556 | 61 | 0.721805 |
3bc55dd2cd0b831c22a64b0a1f28b88effee55af | 64,026 | py | Python | psychopy_tobii_controller/tobii_wrapper.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | psychopy_tobii_controller/tobii_wrapper.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | psychopy_tobii_controller/tobii_wrapper.py | Toonwire/infancy_eye_tracking | 7b96a9d832f60f83fd5098ada2117ab1d0f56fed | [
"MIT"
] | null | null | null | #
# Tobii controller for PsychoPy
#
# author: Hiroyuki Sogo
# Distributed under the terms of the GNU General Public License v3 (GPLv3).
#
# edited by: Lukas Villumsen and Sebastian Nyholm
#
#
from __future__ import division
from __future__ import absolute_import
import types
import datetime
import numpy as np
import time
import warnings
import math
import tobii_research
try:
import Image
import ImageDraw
except:
from PIL import Image
from PIL import ImageDraw
import psychopy.visual
import psychopy.event
import psychopy.core
import psychopy.monitors
import psychopy.logging
import psychopy.sound
class tobii_controller:
"""
Default estimates, subject to change
"""
dist_to_screen = 60
screen_width = 1200
screen_height = 800
"""
PsychoPy specfications
"""
psychopy.logging.console.setLevel(psychopy.logging.CRITICAL) # IGNORE UNSAVED MONITOR WARNINGS IN CONSOLE
default_background_color = [-1,-1,-1]
is_mouse_enabled = False
rot_deg_per_frame = 3 # how many degrees of rotation per frame
default_calibration_target_dot_size = {
'pix': 2.0, 'norm':0.004, 'height':0.002, 'cm':0.05,
'deg':0.05, 'degFlat':0.05, 'degFlatPos':0.05
}
default_calibration_target_disc_size = {
'pix': 2.0*20, 'norm':0.004*20, 'height':0.002*20, 'cm':0.05*20,
'deg':0.05*20, 'degFlat':0.05*20, 'degFlatPos':0.05*20
}
default_key_index_dict = {
'1':0, 'num_1':0, '2':1, 'num_2':1, '3':2, 'num_3':2,
'4':3, 'num_4':3, '5':4, 'num_5':4, '6':5, 'num_6':5,
'7':6, 'num_7':6, '8':7, 'num_8':7, '9':8, 'num_9':8
}
"""
Tobii controller for PsychoPy
tobii_research package is required to use this class.
"""
eyetracker = None
calibration = None
win = None
control_window = None
gaze_data = []
event_data = []
retry_points = []
datafile = None
embed_events = False
recording = False
key_index_dict = default_key_index_dict.copy()
# Tobii data collection parameters
subscribe_to_data = False
do_reset_recording = True
current_target = (0.5, 0.5)
global_gaze_data = []
gaze_params = [
'device_time_stamp',
'left_gaze_origin_in_trackbox_coordinate_system',
'left_gaze_origin_in_user_coordinate_system',
'left_gaze_origin_validity',
'left_gaze_point_in_user_coordinate_system',
'left_gaze_point_on_display_area',
'left_gaze_point_validity',
'left_pupil_diameter',
'left_pupil_validity',
'right_gaze_origin_in_trackbox_coordinate_system',
'right_gaze_origin_in_user_coordinate_system',
'right_gaze_origin_validity',
'right_gaze_point_in_user_coordinate_system',
'right_gaze_point_on_display_area',
'right_gaze_point_validity',
'right_pupil_diameter',
'right_pupil_validity',
'system_time_stamp',
'current_target_point_on_display_area'
]
# license_file = "licenses/license_key_00395217_-_DTU_Compute_IS404-100106342114" #lab
license_file = "licenses/license_key_00395217_-_DTU_Compute_IS404-100106241134" #home
def __init__(self, screen_width, screen_height, eyetracker_id=0):
"""
Initialize tobii_controller object.
:param win: PsychoPy Window object.
:param int id: ID of Tobii unit to connect with.
Default value is 0.
"""
self.screen_width = screen_width
self.screen_height = screen_height
self.sound = psychopy.sound.Sound('sounds/baby_einstein.wav')
self.set_up_eyetracker(eyetracker_id)
def set_up_eyetracker(self, eyetracker_id=0):
eyetrackers = tobii_research.find_all_eyetrackers()
if len(eyetrackers)==0:
print('No Tobii eyetrackers')
else:
try:
self.eyetracker = eyetrackers[eyetracker_id]
with open(self.license_file, "rb") as f:
license = f.read()
res = self.eyetracker.apply_licenses(license)
if len(res) == 0:
print("Successfully applied license from single key")
else:
print("Failed to apply license from single key. Validation result: %s." % (res[0].validation_result))
except:
raise ValueError('Invalid eyetracker ID {}\n({} eyetrackers found)'.format(eyetracker_id, len(eyetrackers)))
if self.is_eye_tracker_on():
self.calibration = tobii_research.ScreenBasedCalibration(self.eyetracker)
else:
self.eyetracker = None
def is_eye_tracker_on(self):
self.subscribe_dict()
self.start_recording()
time.sleep(1)
self.stop_recording()
self.unsubscribe_dict()
return len(self.global_gaze_data) > 0
def set_dist_to_screen(self, dist_to_screen):
self.dist_to_screen = dist_to_screen
def play_sound(self):
self.sound.play()
def pause_sound(self):
self.sound.stop()
def cm2deg(self, cm, monitor, correctFlat=False):
"""
Bug-fixed version of psychopy.tools.monitorunittools.cm2deg
(PsychoPy version<=1.85.1).
"""
if not isinstance(monitor, psychopy.monitors.Monitor):
msg = ("cm2deg requires a monitors.Monitor object as the second "
"argument but received %s")
raise ValueError(msg % str(type(monitor)))
dist = monitor.getDistance()
if dist is None:
msg = "Monitor %s has no known distance (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if correctFlat:
return np.degrees(np.arctan(cm / dist))
else:
return cm / (dist * 0.017455)
def pix2deg(self, pixels, monitor, correctFlat=False):
"""
Bug-fixed version of psychopy.tools.monitorunittools.pix2deg
(PsychoPy version<=1.85.1).
"""
scrWidthCm = monitor.getWidth()
scrSizePix = monitor.getSizePix()
if scrSizePix is None:
msg = "Monitor %s has no known size in pixels (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
if scrWidthCm is None:
msg = "Monitor %s has no known width in cm (SEE MONITOR CENTER)"
raise ValueError(msg % monitor.name)
cmSize = pixels * float(scrWidthCm) / scrSizePix[0]
return self.cm2deg(cmSize, monitor, correctFlat)
def make_psycho_window(self, background_color=None, screen=1):
self.bg_color = background_color
# make a new monitor for the window - ignore the warning (we dont store any calibrations for this monitor)
mon = psychopy.monitors.Monitor('MyScreen')
width = self.screen_width if screen == 1 else 700
height = self.screen_width if screen == 1 else 500
mon.setDistance(self.dist_to_screen)
mon.setSizePix((width, height))
bg = self.bg_color if self.bg_color != None else self.default_background_color
if screen == 1:
self.win = psychopy.visual.Window(size=(self.screen_width, self.screen_height), screen=screen, fullscr=True, units='norm', monitor=mon)
self.win.setColor(bg, colorSpace='rgb')
psychopy.event.Mouse(visible=self.is_mouse_enabled, win=self.win)
if screen == 0:
self.control_window = psychopy.visual.Window(size=(width, height), screen=screen, fullscr=False, units='norm', monitor=mon, pos = [1920-width-10,1080/4])
self.control_window.setColor(bg, colorSpace='rgb')
print(self.control_window.pos)
def close_psycho_window(self, screen=1):
self.bg_color = None # reset color scheme
if screen == 1:
self.win.winHandle.set_fullscreen(False) # disable fullscreen
self.win.close()
elif screen == 0:
# self.control_window.winHandle.set_fullscreen(False) # disable fullscreen
self.control_window.close()
def show_status_admin(self, text_color='white', enable_mouse=False, screen=1):
"""
Draw eyetracker status on the screen.
:param text_color: Color of message text. Default value is 'white'
:param bool enable_mouse: If True, mouse operation is enabled.
Default value is False.
"""
self.make_psycho_window(background_color="gray", screen=screen)
window = self.win if screen == 1 else self.control_window
# if enable_mouse == False:
# mouse = psychopy.event.Mouse(visible=False, win=self.win)
self.gaze_data_status = None
msg = psychopy.visual.TextStim(window, color=text_color,
height=0.02, pos=(0,-0.35), units='height', autoLog=False, text="No eye tracker data detected")
bgrect = psychopy.visual.Rect(window,
width=0.6, height=0.6, lineColor='white', fillColor='black',
units='height', autoLog=False)
leye = psychopy.visual.Circle(window,
size=0.05, units='height', lineColor=None, fillColor='green',
autoLog=False)
reye = psychopy.visual.Circle(window, size=0.05, units='height',
lineColor=None, fillColor='red', autoLog=False)
b_show_status = True
while b_show_status:
bgrect.draw()
if self.gaze_data_status is not None:
lp, lv, rp, rv = self.gaze_data_status
msgst = 'Left: {:.3f},{:.3f},{:.3f}\n'.format(*lp)
msgst += 'Right: {:.3f},{:.3f},{:.3f}\n'.format(*rp)
msg.setText(msgst)
if lv:
leye.setPos(((1-lp[0]-0.5)/2,(1-lp[1]-0.5)/2))
leye.setRadius((1-lp[2])/2)
leye.draw()
if rv:
reye.setPos(((1-rp[0]-0.5)/2,(1-rp[1]-0.5)/2))
reye.setRadius((1-rp[2])/2)
reye.draw()
for key in psychopy.event.getKeys():
if key == 'escape' or key == 'space':
b_show_status = False
# if enable_mouse and mouse.getPressed()[0]:
# b_show_status = False
msg.draw()
window.flip()
self.close_psycho_window(screen=screen)
def show_status(self, text_color='white', enable_mouse=False, screen=1):
"""
Draw eyetracker status on the screen.
:param text_color: Color of message text. Default value is 'white'
:param bool enable_mouse: If True, mouse operation is enabled.
Default value is False.
"""
self.make_psycho_window(background_color="gray", screen=screen)
window = self.win if screen == 1 else self.control_window
# if enable_mouse == False:
# mouse = psychopy.event.Mouse(visible=False, win=self.win)
self.gaze_data_status = None
# if self.eyetracker is not None:
# self.eyetracker.subscribe_to(tobii_research.EYETRACKER_GAZE_DATA, self.on_gaze_data_status)
msg = psychopy.visual.TextStim(window, color=text_color,
height=0.02, pos=(0,-0.35), units='height', autoLog=False, text="No eye tracker data detected")
bgrect = psychopy.visual.Rect(window,
width=0.6, height=0.6, lineColor='white', fillColor='black',
units='height', autoLog=False)
leye = psychopy.visual.Circle(window,
size=0.05, units='height', lineColor=None, fillColor='green',
autoLog=False)
reye = psychopy.visual.Circle(window, size=0.05, units='height',
lineColor=None, fillColor='red', autoLog=False)
b_show_status = True
while b_show_status:
bgrect.draw()
if self.gaze_data_status is not None:
lp, lv, rp, rv = self.gaze_data_status
msgst = 'Left: {:.3f},{:.3f},{:.3f}\n'.format(*lp)
msgst += 'Right: {:.3f},{:.3f},{:.3f}\n'.format(*rp)
msg.setText(msgst)
if lv:
leye.setPos(((1-lp[0]-0.5)/2,(1-lp[1]-0.5)/2))
leye.setRadius((1-lp[2])/2)
leye.draw()
if rv:
reye.setPos(((1-rp[0]-0.5)/2,(1-rp[1]-0.5)/2))
reye.setRadius((1-rp[2])/2)
reye.draw()
for key in psychopy.event.getKeys():
if key == 'escape' or key == 'space':
b_show_status = False
# if enable_mouse and mouse.getPressed()[0]:
# b_show_status = False
# msg.draw()
window.flip()
# if self.eyetracker is not None:
# self.eyetracker.unsubscribe_from(tobii_research.EYETRACKER_GAZE_DATA)
self.close_psycho_window(screen=screen)
def on_gaze_data_status(self, gaze_data):
"""
Callback function used by
:func:`~psychopy_tobii_controller.tobii_controller.show_status`
Usually, users don't have to call this method.
"""
lp = gaze_data.left_eye.gaze_origin.position_in_track_box_coordinates
lv = gaze_data.left_eye.gaze_origin.validity
rp = gaze_data.right_eye.gaze_origin.position_in_track_box_coordinates
rv = gaze_data.right_eye.gaze_origin.validity
self.gaze_data_status = (lp, lv, rp, rv)
def start_custom_calibration(self, num_points=2, stim_type="default", stimuli_path="stimuli/smiley_yellow.png"):
# Run calibration.
target_points = [(-0.5, 0.0), (0.5, 0.0)]
if num_points == 5:
target_points = [(-0.4,0.4), (0.4,0.4), (0.0,0.0), (-0.4,-0.4), (0.4,-0.4)]
self.run_calibration(target_points, stim_type=stim_type, stimuli_path="stimuli/smiley_yellow.png")
# THIS CODE MAKES A GAZE TRACE AFTER THE CALIBRATION
# # If calibration is aborted by pressing ESC key, return value of run_calibration()
# # is 'abort'.
# if ret != 'abort':
#
# marker = psychopy.visual.Rect(self.win, width=0.01, height=0.01)
#
# # Start recording.
# self.subscribe()
# waitkey = True
# while waitkey:
# # Get the latest gaze position data.
# currentGazePosition = self.get_current_gaze_position()
#
# # Gaze position is a tuple of four values (lx, ly, rx, ry).
# # The value is numpy.nan if Tobii failed to detect gaze position.
# if not np.nan in currentGazePosition:
# marker.setPos(currentGazePosition[0:2])
# marker.setLineColor('white')
# else:
# marker.setLineColor('red')
# keys = psychopy.event.getKeys ()
# if 'space' in keys:
# waitkey=False
# elif len(keys)>=1:
# # Record the first key name to the data file.
# self.record_event(keys[0])
#
# marker.draw()
# self.win.flip()
# # Stop recording.
# self.unsubscribe()
# # Close the data file.
# self.close_datafile()
# self.close_psycho_window()
def run_calibration(self, calibration_points, move_duration=1.5,
shuffle=True, start_key='space', decision_key='space',
text_color='white', enable_mouse=False, stim_type="default", stimuli_path="stimuli/smiley_yellow.png"):
"""
Run calibration.
:param calibration_points: List of position of calibration points.
:param float move_duration: Duration of animation of calibration target.
Unit is second. Default value is 1.5.
:param bool shuffle: If True, order of calibration points is shuffled.
Otherwise, calibration target moves in the order of calibration_points.
Default value is True.
:param str start_key: Name of key to start calibration procedure.
If None, calibration starts immediately afte this method is called.
Default value is 'space'.
:param str decision_key: Name of key to accept/retry calibration.
Default value is 'space'.
:param text_color: Color of message text. Default value is 'white'
:param bool enable_mouse: If True, mouse operation is enabled.
Default value is False.
"""
# set sizes and init calibration
self.calibration_target_dot_size = self.default_calibration_target_dot_size[self.win.units]
self.calibration_target_disc_size = self.default_calibration_target_disc_size[self.win.units]
self.calibration_target_dot = psychopy.visual.Circle(self.win,
radius=self.calibration_target_dot_size, fillColor='white',
lineColor=None,lineWidth=1, autoLog=False)
self.calibration_target_disc = psychopy.visual.Circle(self.win,
radius=self.calibration_target_disc_size, fillColor='lime',
lineColor='white', lineWidth=1, autoLog=False)
self.update_calibration = self.update_calibration_default
if self.win.units == 'norm': # fix oval
self.calibration_target_dot.setSize([float(self.win.size[1])/self.win.size[0], 1.0])
self.calibration_target_disc.setSize([float(self.win.size[1])/self.win.size[0], 1.0])
if not (2 <= len(calibration_points) <= 9):
raise ValueError('Calibration points must be 2~9')
if enable_mouse == False:
mouse = psychopy.event.Mouse(visible=False, win=self.win)
img = Image.new('RGBA',tuple(self.win.size))
img_draw = ImageDraw.Draw(img)
result_img = psychopy.visual.SimpleImageStim(self.win, img, autoLog=False)
result_msg = psychopy.visual.TextStim(self.win, pos=(0,-self.win.size[1]/4),
color=text_color, units='pix', autoLog=False)
remove_marker = psychopy.visual.Circle(
self.win, radius=self.calibration_target_dot.radius*5,
fillColor='black', lineColor='white', lineWidth=1, autoLog=False)
if self.win.units == 'norm': # fix oval
remove_marker.setSize([float(self.win.size[1])/self.win.size[0], 1.0])
remove_marker.setSize([float(self.win.size[1])/self.win.size[0], 1.0])
if self.eyetracker is not None:
self.calibration.enter_calibration_mode()
self.move_duration = move_duration
self.original_calibration_points = calibration_points[:]
self.retry_points = list(range(len(self.original_calibration_points))) # set all points
in_calibration_loop = True
while in_calibration_loop:
self.calibration_points = []
for i in range(len(self.original_calibration_points)):
if i in self.retry_points:
self.calibration_points.append(self.original_calibration_points[i])
if shuffle:
np.random.shuffle(self.calibration_points)
if start_key is not None or enable_mouse:
waitkey = False
if start_key is not None:
if enable_mouse == True:
result_msg.setText('Press {} or click left button to start calibration'.format(start_key))
else:
result_msg.setText('Press {} to start calibration'.format(start_key))
else: # enable_mouse==True
result_msg.setText('Click left button to start calibration')
while waitkey:
for key in psychopy.event.getKeys():
if key==start_key:
waitkey = False
if enable_mouse and mouse.getPressed()[0]:
waitkey = False
result_msg.draw()
self.win.flip()
else:
self.win.flip()
if stim_type == "default":
self.update_calibration()
elif stim_type == "img":
self.update_calibration_img(stimuli_path)
calibration_result = None
if self.eyetracker is not None:
calibration_result = self.calibration.compute_and_apply()
self.win.flip()
img_draw.rectangle(((0,0),tuple(self.win.size)),fill=(0,0,0,0))
if calibration_result is None or calibration_result.status == tobii_research.CALIBRATION_STATUS_FAILURE:
#computeCalibration failed.
pass
else:
if len(calibration_result.calibration_points) == 0:
pass
else:
for calibration_point in calibration_result.calibration_points:
p = calibration_point.position_on_display_area
for calibration_sample in calibration_point.calibration_samples:
lp = calibration_sample.left_eye.position_on_display_area
rp = calibration_sample.right_eye.position_on_display_area
if calibration_sample.left_eye.validity == tobii_research.VALIDITY_VALID_AND_USED:
img_draw.line(((p[0]*self.win.size[0], p[1]*self.win.size[1]),
(lp[0]*self.win.size[0], lp[1]*self.win.size[1])), fill=(0,255,0,255))
if calibration_sample.right_eye.validity == tobii_research.VALIDITY_VALID_AND_USED:
img_draw.line(((p[0]*self.win.size[0], p[1]*self.win.size[1]),
(rp[0]*self.win.size[0], rp[1]*self.win.size[1])), fill=(255,0,0,255))
img_draw.ellipse(((p[0]*self.win.size[0]-3, p[1]*self.win.size[1]-3),
(p[0]*self.win.size[0]+3, p[1]*self.win.size[1]+3)), outline=(0,0,0,255))
if enable_mouse == False:
result_msg.setText('Accept/Retry: {} or right-click\nSelect recalibration points: 0-9 key or left-click'.format(decision_key))
else:
result_msg.setText('Accept/Retry: {}\nSelect recalibration points: 0-9 key'.format(decision_key))
result_img.setImage(img)
waitkey = True
self.retry_points = []
if enable_mouse == False:
mouse.setVisible(True)
while waitkey:
for key in psychopy.event.getKeys():
if key in [decision_key, 'escape']:
waitkey = False
elif key in ['0', 'num_0']:
if len(self.retry_points) == 0:
self.retry_points = list(range(len(self.original_calibration_points)))
else:
self.retry_points = []
elif key in self.key_index_dict:
key_index = self.key_index_dict[key]
if key_index<len(self.original_calibration_points):
if key_index in self.retry_points:
self.retry_points.remove(key_index)
else:
self.retry_points.append(key_index)
if enable_mouse == False:
pressed = mouse.getPressed()
if pressed[2]: # right click
key = decision_key
waitkey = False
elif pressed[0]: # left click
mouse_pos = mouse.getPos()
for key_index in range(len(self.original_calibration_points)):
p = self.original_calibration_points[key_index]
if np.linalg.norm([mouse_pos[0]-p[0], mouse_pos[1]-p[1]]) < self.calibration_target_dot.radius*5:
if key_index in self.retry_points:
self.retry_points.remove(key_index)
else:
self.retry_points.append(key_index)
time.sleep(0.2)
break
result_img.draw()
if len(self.retry_points)>0:
for index in self.retry_points:
if index > len(self.original_calibration_points):
self.retry_points.remove(index)
remove_marker.setPos(self.original_calibration_points[index])
remove_marker.draw()
result_msg.draw()
self.win.flip()
if key == decision_key:
if len(self.retry_points) == 0:
# retval = 'accept'
in_calibration_loop = False
else: #retry
for point_index in self.retry_points:
x, y = self.get_tobii_pos(self.original_calibration_points[point_index])
if self.eyetracker is not None:
self.calibration.discard_data(x, y)
elif key == 'escape':
# retval = 'abort'
in_calibration_loop = False
else:
raise RuntimeError('Calibration: Invalid key')
if enable_mouse == False:
mouse.setVisible(False)
if self.eyetracker is not None:
self.calibration.leave_calibration_mode()
if enable_mouse == False:
mouse.setVisible(False)
def flash_screen(self):
r = self.win.color[0]
g = self.win.color[1]
b = self.win.color[2]
while r <= 1:
r += 0.05
g += 0.05
b += 0.05
self.win.setColor((r,g,b), colorSpace='rgb')
psychopy.core.wait(0.05)
self.win.flip()
while r >= -1:
r -= 0.05
g -= 0.05
b -= 0.05
self.win.setColor((r,g,b), colorSpace='rgb')
psychopy.core.wait(0.05)
self.win.flip()
def animate_test(self, gaze_data_left, gaze_data_right, gaze_data_left_corrected, gaze_data_right_corrected, target_points, stimuli_paths=["stimuli/smiley_yellow.png"], frame_delay=0.015):
self.make_psycho_window()
img_stims = []
for stimuli_path in stimuli_paths:
img = Image.open(stimuli_path)
img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
img_stim.size = (0.15, 0.15)
img_stims.append(img_stim)
for i, (gaze_point_left, gaze_point_right, gaze_point_left_corrected, gaze_point_right_corrected, target_point) in enumerate(zip(gaze_data_left.T, gaze_data_right.T, gaze_data_left_corrected.T, gaze_data_right_corrected.T, target_points.T)):
target_point = self.get_psychopy_pos(target_point)
gaze_point_left = self.get_psychopy_pos(gaze_point_left)
gaze_point_right = self.get_psychopy_pos(gaze_point_right)
gaze_point_left_corrected = self.get_psychopy_pos(gaze_point_left_corrected)
gaze_point_right_corrected = self.get_psychopy_pos(gaze_point_right_corrected)
img_stim = img_stims[(i - 1) % len(img_stims)]
img_stim.setPos(target_point)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.draw()
stim_left = psychopy.visual.Circle(self.win, radius=0.05, fillColor='red', autoLog=False)
stim_left.setPos(gaze_point_left)
stim_left.draw()
stim_right = psychopy.visual.Circle(self.win, radius=0.05, fillColor='green', autoLog=False)
stim_right.setPos(gaze_point_right)
stim_right.draw()
stim_left_corrected= psychopy.visual.Circle(self.win, radius=0.05, fillColor='blue', autoLog=False)
stim_left_corrected.setPos(gaze_point_left_corrected)
stim_left_corrected.draw()
stim_right_corrected = psychopy.visual.Circle(self.win, radius=0.05, fillColor='purple', autoLog=False)
stim_right_corrected.setPos(gaze_point_right_corrected)
stim_right_corrected.draw()
self.win.flip()
psychopy.core.wait(frame_delay)
self.close_psycho_window(screen=1)
def animate_test_2(self, gaze_data_left, gaze_data_right, target_points, stimuli_paths=["stimuli/smiley_yellow.png"], frame_delay=0.015):
self.make_psycho_window()
time.sleep(5)
img_stims = []
for stimuli_path in stimuli_paths:
img = Image.open(stimuli_path)
img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
img_stim.size = (0.15, 0.15)
img_stims.append(img_stim)
b_left = (0,0)
b_right = (0,0)
((4+5+6+7)/4)+((8-((4+5+6+7)/4))/5)
for i, (gaze_point_left, gaze_point_right, target_point) in enumerate(zip(gaze_data_left.T, gaze_data_right.T, target_points.T)):
b_left = ((b_left[0] + ((target_point[0] - gaze_point_left[0]) - (b_left[0])) / (i+1)), (b_left[1] + ((target_point[1] - gaze_point_left[1]) - (b_left[1])) / (i+1)))
b_right = ((b_right[0] + ((target_point[0] - gaze_point_right[0]) - (b_right[0])) / (i+1)), (b_right[1] + ((target_point[1] - gaze_point_right[1]) - (b_right[1])) / (i+1)))
gaze_point_left_corrected = (gaze_point_left[0] + b_left[0], gaze_point_left[1] + b_left[1])
gaze_point_right_corrected = (gaze_point_right[0] + b_right[0], gaze_point_right[1] + b_right[1])
target_point = self.get_psychopy_pos(target_point)
gaze_point_left = self.get_psychopy_pos(gaze_point_left)
gaze_point_right = self.get_psychopy_pos(gaze_point_right)
gaze_point_left_corrected = self.get_psychopy_pos(gaze_point_left_corrected)
gaze_point_right_corrected = self.get_psychopy_pos(gaze_point_right_corrected)
img_stim = img_stims[(i - 1) % len(img_stims)]
img_stim.setPos(target_point)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.draw()
stim_left = psychopy.visual.Circle(self.win, radius=0.05, fillColor='red', autoLog=False)
stim_left.setPos(gaze_point_left)
stim_left.draw()
stim_right = psychopy.visual.Circle(self.win, radius=0.05, fillColor='green', autoLog=False)
stim_right.setPos(gaze_point_right)
stim_right.draw()
stim_left_corrected= psychopy.visual.Circle(self.win, radius=0.05, fillColor='blue', autoLog=False)
stim_left_corrected.setPos(gaze_point_left_corrected)
stim_left_corrected.draw()
stim_right_corrected = psychopy.visual.Circle(self.win, radius=0.05, fillColor='purple', autoLog=False)
stim_right_corrected.setPos(gaze_point_right_corrected)
stim_right_corrected.draw()
self.win.flip()
psychopy.core.wait(frame_delay)
self.close_psycho_window(screen=1)
# def make_transformation(self, stimuli_path="stimuli/smiley_yellow.png", enable_mouse=False):
#
# img = Image.open(stimuli_path)
# img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
# img_stim.size = (0.15,0.15)
#
# img_positions = [(-0.5,-0.5), (0.5,-0.5), (-0.5, 0.5), (0.5, 0.5), (0.0, 0.0)]
# np.random.shuffle(img_positions)
#
# self.subscribe_dict()
# clock = psychopy.core.Clock()
#
# for img_pos in img_positions:
# self.current_target = self.get_tobii_pos(img_pos)
#
# i = 0
# clock.reset()
# current_time = clock.getTime()
# while current_time < 3:
# img_stim.setPos(img_pos)
# img_stim.ori = i * self.rot_deg_per_frame
# img_stim.draw()
# self.win.flip()
#
# i += 1
# psychopy.core.wait(0.015)
# current_time = clock.getTime()
#
# self.unsubscribe_dict()
def start_fixation_exercise(self, positions=[(-0.5,-0.5), (0.5,-0.5), (-0.5, 0.5), (0.5, 0.5), (0.0, 0.0)], stimuli_paths=["stimuli/smiley_yellow.png"], frame_delay=0.015, fixation_duration = 3):
img_stims = []
for stimuli_path in stimuli_paths:
img = Image.open(stimuli_path)
img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
img_stim.size = (0.15, 0.15)
img_stims.append(img_stim)
np.random.shuffle(positions)
# self.subscribe_dict()
self.start_recording()
clock = psychopy.core.Clock()
pos_index = 0
for pos in positions:
self.current_target = self.get_tobii_pos(pos)
i = 0
clock.reset()
current_time = clock.getTime()
while current_time < fixation_duration:
img_stim = img_stims[(pos_index - 1) % len(img_stims)]
img_stim.setPos(pos)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.draw()
self.win.flip()
i += 1
psychopy.core.wait(frame_delay)
current_time = clock.getTime()
pos_index += 1
# self.unsubscribe_dict()
self.stop_recording()
def start_fixation_exercise_animate_transition(self, positions=[(-0.5,-0.5), (0.5,-0.5), (-0.5, 0.5), (0.5, 0.5), (0.0, 0.0)], stimuli_paths=["stimuli/smiley_yellow.png"], frame_delay=0.015, move_duration=1, fixation_duration = 3):
img_stims = []
for stimuli_path in stimuli_paths:
img = Image.open(stimuli_path)
img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
img_stim.size = (0.15, 0.15)
img_stims.append(img_stim)
np.random.shuffle(positions)
position_pairs = [[positions[i], positions[i+1]] for i in range(len(positions)-1)]
# self.subscribe_dict()
self.start_recording()
clock = psychopy.core.Clock()
pos_index = 0
for pos in positions:
self.current_target = self.get_tobii_pos(pos)
i = 0
clock.reset()
current_time = clock.getTime()
while current_time < fixation_duration:
img_stim = img_stims[(pos_index - 1) % len(img_stims)]
img_stim.setPos(pos)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.draw()
self.win.flip()
i += 1
psychopy.core.wait(frame_delay)
current_time = clock.getTime()
if pos_index < len(position_pairs):
# self.subscribe_to_data = False
self.do_reset_recording = False
self.start_pursuit_exercise(pathing="linear", positions=position_pairs[pos_index], stimuli_paths=stimuli_paths, frame_delay=frame_delay, move_duration=move_duration)
# self.subscribe_to_data = True
self.do_reset_recording = True
pos_index += 1
# self.unsubscribe_dict()
self.stop_recording()
def calc_pursuit_route(self, pathing, positions, frame_delay=0.015, move_duration=5, reverse=False):
# Normal coordinate system
intermediate_positions = []
move_steps = move_duration / frame_delay
if pathing == "linear":
total_dist = 0
for i in range(len(positions) - 1):
total_dist += self.get_euclidean_distance(positions[i], positions[i + 1])
# intermediate points
for i in range(len(positions)):
if i+1 < len(positions):
start_pos = positions[i]
end_pos = positions[i+1]
euc_dist = self.get_euclidean_distance(start_pos, end_pos)
amount_of_path = euc_dist / total_dist
move_steps_for_path = amount_of_path * move_steps
intermediate_positions.extend(self.get_equidistant_points(start_pos, end_pos, move_steps_for_path))
elif pathing == "circle" and len(positions) == 2:
start_pos = positions[0]
center_pos = positions[1]
intermediate_positions.append(start_pos)
r = ((start_pos[0] - center_pos[0]) ** 2 + (start_pos[1] - center_pos[1]) ** 2) ** 0.5
theta_x = math.acos(start_pos[0] / r)
theta_y = math.asin(start_pos[1] / r)
theta = theta_x if theta_y >= 0 else -theta_x
delta_theta = 2*math.pi / move_steps
step = 0
while move_steps > step:
step = step + 1
theta = theta + delta_theta
pos = (r*math.cos(theta), r*math.sin(theta))
intermediate_positions.append(pos)
elif pathing == "spiral" and len(positions) == 2:
start_pos = positions[0]
end_pos = positions[1]
intermediate_positions.append(start_pos)
r = ((start_pos[0] - end_pos[0]) ** 2 + (start_pos[1] - end_pos[1]) ** 2) ** 0.5
theta_x = math.acos(start_pos[0] / r)
theta_y = math.asin(start_pos[1] / r)
theta = theta_x if theta_y >= 0 else -theta_x
dr = r / move_steps
while r >= 0:
r -= dr
theta = theta + (0.05 * math.pi) / (r * (move_duration + 1/r))
pos = (r*math.cos(theta), r*math.sin(theta))
intermediate_positions.append(pos)
if reverse:
intermediate_positions.reverse()
return intermediate_positions
def start_pursuit_exercise(self, pathing="linear", positions=[(-0.7,0.0),(0.0,0.0)], stimuli_paths=["stimuli/smiley_yellow.png"], reverse=False, frame_delay=0.011, move_duration=5):
img_stims = []
for stimuli_path in stimuli_paths:
img = Image.open(stimuli_path)
img_stim = psychopy.visual.ImageStim(self.win, image=img, autoLog=False)
img_stim.size = (0.15, 0.15)
img_stims.append(img_stim)
# frame_delay = 0.015
intermediate_positions = self.calc_pursuit_route(pathing, positions=positions, frame_delay=frame_delay, move_duration=move_duration, reverse=reverse)
if self.do_reset_recording:
# self.subscribe_dict()
self.start_recording()
pos_index = 0
for i, pos in enumerate(intermediate_positions):
img_stim = img_stims[(pos_index) % len(img_stims)]
img_stim.setPos(pos)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.opacity = 1.0
img_stim.draw()
if pathing == "spiral":
img_stim = img_stims[(pos_index + 1) % len(img_stims)]
img_stim.setPos(pos)
img_stim.ori = i * self.rot_deg_per_frame
img_stim.opacity = (i % int(len(intermediate_positions) / len(img_stims) + 1)) / int(len(intermediate_positions) / len(img_stims))
img_stim.draw()
self.win.flip() # await back buffer flip
self.current_target = self.get_tobii_pos(pos) # update target only after rendering has completed
if pathing == "linear" and pos[0] == positions[pos_index + 1][0] and pos[1] == positions[pos_index + 1][1]:
pos_index += 1
if pathing == "spiral" and i % int(len(intermediate_positions) / len(img_stims)) == 0 and i > 0:
pos_index += 1
psychopy.core.wait(frame_delay)
if self.do_reset_recording:
# self.unsubscribe_dict()
self.stop_recording()
def get_euclidean_distance(self, p1, p2):
return ((p1[0] - p2[0])**2+(p1[1] - p2[1])**2)**0.5
def get_equidistant_points(self, p1, p2, parts):
return zip(np.linspace(p1[0], p2[0], parts), np.linspace(p1[1], p2[1], parts))
def collect_calibration_data(self, p, cood='PsychoPy'):
"""
Callback function used by
:func:`~psychopy_tobii_controller.tobii_controller.run_calibration`
Usually, users don't have to call this method.
"""
if cood=='PsychoPy':
self.calibration.collect_data(*self.get_tobii_pos(p))
elif cood =='Tobii':
self.calibration.collect_data(*p)
else:
raise ValueError('cood must be \'PsychoPy\' or \'Tobii\'')
def update_calibration_default(self):
"""
Updating calibration target and correcting calibration data.
This method is called by
:func:`~psychopy_tobii_controller.tobii_controller.run_calibration`
Usually, users don't have to call this method.
"""
clock = psychopy.core.Clock()
for point_index in range(len(self.calibration_points)):
x, y = self.get_tobii_pos(self.calibration_points[point_index])
self.calibration_target_dot.setPos(self.calibration_points[point_index])
self.calibration_target_disc.setPos(self.calibration_points[point_index])
clock.reset()
current_time = clock.getTime()
while current_time < self.move_duration:
self.calibration_target_disc.setRadius(
(self.calibration_target_dot_size*2.0-self.calibration_target_disc_size)/ \
self.move_duration*current_time+self.calibration_target_disc_size
)
psychopy.event.getKeys()
self.calibration_target_disc.draw()
self.calibration_target_dot.draw()
self.win.flip()
current_time = clock.getTime()
if self.eyetracker is not None:
self.calibration.collect_data(x, y)
def update_calibration_img(self, stimuli_path):
stim_img = Image.open(stimuli_path)
stimuli = psychopy.visual.ImageStim(self.win, image=stim_img, autoLog=False)
stimuli.size = (0.15,0.15)
position_pairs = [[self.calibration_points[i], self.calibration_points[i+1]] for i in range(len(self.calibration_points)-1)]
clock = psychopy.core.Clock()
for point_index in range(len(self.calibration_points)):
x, y = self.get_tobii_pos(self.calibration_points[point_index])
i = 0
clock.reset()
current_time = clock.getTime()
while current_time < self.move_duration:
psychopy.event.getKeys()
stimuli.setPos(self.calibration_points[point_index])
stimuli.ori = i * self.rot_deg_per_frame
stimuli.draw()
self.win.flip()
i += 1
psychopy.core.wait(0.015)
current_time = clock.getTime()
if self.eyetracker is not None:
self.calibration.collect_data(x, y)
if point_index < len(position_pairs):
self.do_reset_recording = False
self.start_pursuit_exercise(pathing="linear", positions=position_pairs[point_index], stimuli_paths=[stimuli_path], move_duration=1)
self.do_reset_recording = True
def set_custom_calibration(self, func):
"""
Set custom calibration function.
:param func: custom calibration function.
"""
self.update_calibration = types.MethodType(func, self, tobii_controller)
def use_default_calibration(self):
"""
Revert calibration function to default one.
"""
self.update_calibration = self.update_calibration_default
def get_calibration_keymap(self):
"""
Get current key mapping for selecting calibration points as a dict object.
"""
return self.key_index_dict.copy()
def set_calibration_keymap(self, keymap):
"""
Set key mapping for selecting calibration points.
:param dict keymap: Dict object that holds calibration keymap.
Key of the dict object correspond to PsychoPy key name.
Value is index of the list of calibration points.
For example, if you have only two calibration points and
want to select these points by 'z' and 'x' key, set keymap
{'z':0, 'x':1}.
"""
self.key_index_dict = keymap.copy()
def use_default_calibration_keymap(self):
"""
Set default key mapping for selecting calibration points.
"""
self.key_index_dict = self.default_key_index_dict.copy()
def set_calibration_param(self, param_dict):
"""
Set calibration parameters.
:param dict param_dict: Dict object that holds calibration parameters.
Use :func:`~psychopy_tobii_controller.tobii_controller.get_calibration_param`
to get dict object.
"""
self.calibration_target_dot_size = param_dict['dot_size']
self.calibration_target_dot.lineColor = param_dict['dot_line_color']
self.calibration_target_dot.fillColor = param_dict['dot_fill_color']
self.calibration_target_dot.lineWidth = param_dict['dot_line_width']
self.calibration_target_disc_size = param_dict['disc_size']
self.calibration_target_disc.lineColor = param_dict['disc_line_color']
self.calibration_target_disc.fillColor = param_dict['disc_fill_color']
self.calibration_target_disc.lineWidth = param_dict['disc_line_width']
def get_calibration_param(self):
"""
Get calibration parameters as a dict object.
The dict object has following keys.
- 'dot_size': size of the center dot of calibration target.
- 'dot_line_color': line color of the center dot of calibration target.
- 'dot_fill_color': fill color of the center dot of calibration target.
- 'dot_line_width': line width of the center dot of calibration target.
- 'disc_size': size of the surrounding disc of calibration target.
- 'disc_line_color': line color of the surrounding disc of calibration target
- 'disc_fill_color': fill color of the surrounding disc of calibration target
- 'disc_line_width': line width of the surrounding disc of calibration target
- 'text_color': color of text
"""
param_dict = {'dot_size':self.calibration_target_dot_size,
'dot_line_color':self.calibration_target_dot.lineColor,
'dot_fill_color':self.calibration_target_dot.fillColor,
'dot_line_width':self.calibration_target_dot.lineWidth,
'disc_size':self.calibration_target_disc_size,
'disc_line_color':self.calibration_target_disc.lineColor,
'disc_fill_color':self.calibration_target_disc.fillColor,
'disc_line_width':self.calibration_target_disc.lineWidth}
return param_dict
def subscribe(self):
"""
Start recording.
"""
if self.eyetracker is not None:
self.gaze_data = []
self.event_data = []
self.recording = True
self.eyetracker.subscribe_to(tobii_research.EYETRACKER_GAZE_DATA, self.on_gaze_data)
def unsubscribe(self):
"""
Stop recording.
"""
if self.eyetracker is not None:
self.eyetracker.unsubscribe_from(tobii_research.EYETRACKER_GAZE_DATA)
self.recording = False
self.flush_data()
self.gaze_data = []
self.event_data = []
def start_recording(self):
self.global_gaze_data = []
self.subscribe_to_data = True
def stop_recording(self):
self.subscribe_to_data = False
def subscribe_dict(self):
if self.eyetracker is not None:
self.global_gaze_data = []
self.eyetracker.subscribe_to(tobii_research.EYETRACKER_GAZE_DATA, self.gaze_data_callback, as_dictionary=True)
def unsubscribe_dict(self):
if self.eyetracker is not None:
self.eyetracker.unsubscribe_from(tobii_research.EYETRACKER_GAZE_DATA, self.gaze_data_callback)
def on_gaze_data(self, gaze_data):
"""
Callback function used by
:func:`~psychopy_tobii_controller.tobii_controller.subscribe`
Usually, users don't have to call this method.
"""
t = gaze_data.system_time_stamp
lx = gaze_data.left_eye.gaze_point.position_on_display_area[0]
ly = gaze_data.left_eye.gaze_point.position_on_display_area[1]
lp = gaze_data.left_eye.pupil.diameter
lv = gaze_data.left_eye.gaze_point.validity
rx = gaze_data.right_eye.gaze_point.position_on_display_area[0]
ry = gaze_data.right_eye.gaze_point.position_on_display_area[1]
rp = gaze_data.right_eye.pupil.diameter
rv = gaze_data.right_eye.gaze_point.validity
self.gaze_data.append((t,lx,ly,lp,lv,rx,ry,rp,rv))
def gaze_data_callback(self, gaze_data):
try:
lp = gaze_data['left_gaze_origin_in_trackbox_coordinate_system']
lv = gaze_data['left_gaze_origin_validity']
rp = gaze_data['right_gaze_origin_in_trackbox_coordinate_system']
rv = gaze_data['right_gaze_origin_validity']
self.gaze_data_status = (lp, lv, rp, rv)
gaze_data['current_target_point_on_display_area'] = self.current_target
if self.subscribe_to_data:
self.global_gaze_data.append(gaze_data)
except:
print("Error in callback (dict)")
def get_current_gaze_position(self):
"""
Get current (i.e. the latest) gaze position as a tuple of
(left_x, left_y, right_x, right_y).
Values are numpy.nan if Tobii fails to get gaze position.
"""
if len(self.gaze_data)==0:
return (np.nan, np.nan, np.nan, np.nan)
else:
lxy = self.get_psychopy_pos(self.gaze_data[-1][1:3])
rxy = self.get_psychopy_pos(self.gaze_data[-1][5:7])
return (lxy[0],lxy[1],rxy[0],rxy[1])
def get_current_pupil_size(self):
"""
Get current (i.e. the latest) pupil size as a tuple of
(left, right).
Values are numpy.nan if Tobii fails to get pupil size.
"""
if len(self.gaze_data)==0:
return (None,None)
else:
return (self.gaze_data[-1][3], #lp
self.gaze_data[-1][7]) #rp
def open_datafile(self, filename, embed_events=False):
"""
Open data file.
:param str filename: Name of data file to be opened.
:param bool embed_events: If True, event data is
embeded in gaze data. Otherwise, event data is
separately output after gaze data.
"""
if self.datafile is not None:
self.close_datafile()
self.embed_events = embed_events
self.datafile = open(filename,'w')
self.datafile.write('Recording date:\t'+datetime.datetime.now().strftime('%Y/%m/%d')+'\n')
self.datafile.write('Recording time:\t'+datetime.datetime.now().strftime('%H:%M:%S')+'\n')
self.datafile.write('Recording resolution:\t%d x %d\n' % tuple(self.win.size))
if embed_events:
self.datafile.write('Event recording mode:\tEmbedded\n\n')
else:
self.datafile.write('Event recording mode:\tSeparated\n\n')
def close_datafile(self):
"""
Write data to the data file and close the data file.
"""
if self.datafile != None:
self.flush_data()
self.datafile.close()
self.datafile = None
def record_event(self,event):
"""
Record events with timestamp.
Note: This method works only during recording.
:param str event: Any string.
"""
if not self.recording:
return
self.event_data.append((tobii_research.get_system_time_stamp(), event))
def flush_data(self):
"""
Write data to the data file.
Note: This method do nothing during recording.
"""
if self.datafile == None:
warnings.warn('data file is not set.')
return
if len(self.gaze_data)==0:
return
if self.recording:
return
self.datafile.write('Session Start\n')
if self.embed_events:
self.datafile.write('\t'.join(['TimeStamp',
'GazePointXLeft',
'GazePointYLeft',
'PupilLeft',
'ValidityLeft',
'GazePointXRight',
'GazePointYRight',
'PupilRight',
'ValidityRight',
'GazePointX',
'GazePointY',
'Event'])+'\n')
else:
self.datafile.write('\t'.join(['TimeStamp',
'GazePointXLeft',
'GazePointYLeft',
'PupilLeft',
'ValidityLeft',
'GazePointXRight',
'GazePointYRight',
'PupilRight',
'ValidityRight',
'GazePointX',
'GazePointY'])+'\n')
format_string = '%.1f\t%.4f\t%.4f\t%.4f\t%d\t%.4f\t%.4f\t%.4f\t%d\t%.4f\t%.4f'
timestamp_start = self.gaze_data[0][0]
num_output_events = 0
if self.embed_events:
for i in range(len(self.gaze_data)):
if num_output_events < len(self.event_data) and self.event_data[num_output_events][0] < self.gaze_data[i][0]:
event_t = self.event_data[num_output_events][0]
event_text = self.event_data[num_output_events][1]
if i>0:
output_data = self.convert_tobii_record(
self.interpolate_gaze_data(self.gaze_data[i-1], self.gaze_data[i], event_t),
timestamp_start)
else:
output_data = ((event_t-timestamp_start)/1000.0, np.nan, np.nan, np.nan, 0,
np.nan, np.nan, np.nan, 0, np.nan, np.nan)
self.datafile.write(format_string % output_data)
self.datafile.write('\t%s\n' % (event_text))
num_output_events += 1
self.datafile.write(format_string % self.convert_tobii_record(self.gaze_data[i], timestamp_start))
self.datafile.write('\t\n')
# flush remaining events
if num_output_events < len(self.event_data):
for e_i in range(num_output_events, len(self.event_data)):
event_t = self.event_data[e_i][0]
event_text = self.event_data[e_i][1]
output_data = ((event_t-timestamp_start)/1000.0, np.nan, np.nan, np.nan, 0,
np.nan, np.nan, np.nan, 0, np.nan, np.nan)
self.datafile.write(format_string % output_data)
self.datafile.write('\t%s\n' % (event_text))
else:
for i in range(len(self.gaze_data)):
self.datafile.write(format_string % self.convert_tobii_record(self.gaze_data[i], timestamp_start))
self.datafile.write('\n')
self.datafile.write('TimeStamp\tEvent\n')
for e in self.event_data:
self.datafile.write('%.1f\t%s\n' % ((e[0]-timestamp_start)/1000.0, e[1]))
self.datafile.write('Session End\n\n')
self.datafile.flush()
def get_psychopy_pos(self, p):
"""
Convert PsychoPy position to Tobii coordinate system.
:param p: Position (x, y)
"""
p = (p[0], 1-p[1]) #flip vert
if self.win.units == 'norm':
return (2*p[0]-1, 2*p[1]-1)
elif self.win.units == 'height':
return ((p[0]-0.5)*self.win.size[0]/self.win.size[1], p[1]-0.5)
p_pix = ((p[0]-0.5)*self.win.size[0], (p[1]-0.5)*self.win.size[1])
if self.win.units == 'pix':
return p_pix
elif self.win.units == 'cm':
return (self.pix2cm(p_pix[0], self.win.monitor), self.pix2cm(p_pix[1], self.win.monitor))
elif self.win.units == 'deg':
return (self.pix2deg(p_pix[0], self.win.monitor), self.pix2deg(p_pix[1], self.win.monitor))
elif self.win.units in ['degFlat', 'degFlatPos']:
return (self.pix2deg(np.array(p_pix), self.win.monitor, correctFlat=True))
else:
raise ValueError('unit ({}) is not supported.'.format(self.win.units))
def get_tobii_pos(self, p):
"""
Convert Tobii position to PsychoPy coordinate system.
:param p: Position (x, y)
"""
if self.win.units == 'norm':
gp = ((p[0]+1)/2, (p[1]+1)/2)
elif self.win.units == 'height':
gp = (p[0]*self.win.size[1]/self.win.size[0]+0.5, p[1]+0.5)
elif self.win.units == 'pix':
gp = (p[0]/self.win.size[0]+0.5, p[1]/self.win.size[1]+0.5)
elif self.win.units == 'cm':
p_pix = (self.cm2pix(p[0], self.win.monitor), self.cm2pix(p[1], self.win.monitor))
gp = (p_pix[0]/self.win.size[0]+0.5, p_pix[1]/self.win.size[1]+0.5)
elif self.win.units == 'deg':
p_pix = (self.deg2pix(p[0], self.win.monitor), self.deg2pix(p[1], self.win.monitor))
gp = (p_pix[0]/self.win.size[0]+0.5, p_pix[1]/self.win.size[1]+0.5)
elif self.win.units in ['degFlat', 'degFlatPos']:
p_pix = (self.deg2pix(np.array(p), self.win.monitor, correctFlat=True))
gp = (p_pix[0]/self.win.size[0]+0.5, p_pix[1]/self.win.size[1]+0.5)
else:
raise ValueError('unit ({}) is not supported'.format(self.win.units))
return (gp[0], 1-gp[1]) # flip vert
def convert_tobii_record(self, record, start_time):
"""
Convert tobii data to output style.
Usually, users don't have to call this method.
:param record: element of self.gaze_data.
:param start_time: Tobii's timestamp when recording was started.
"""
lxy = self.get_psychopy_pos(record[1:3])
rxy = self.get_psychopy_pos(record[5:7])
if record[4] == 0 and record[8] == 0: #not detected
ave = (np.nan, np.nan)
elif record[4] == 0:
ave = rxy
elif record[8] == 0:
ave = lxy
else:
ave = ((lxy[0]+rxy[0])/2.0,(lxy[1]+rxy[1])/2.0)
return ((record[0]-start_time)/1000.0,
lxy[0], lxy[1], record[3], record[4],
rxy[0], rxy[1], record[7], record[8],
ave[0], ave[1])
def interpolate_gaze_data(self, record1, record2, t):
"""
Interpolate gaze data between record1 and record2.
Usually, users don't have to call this method.
:param record1: element of self.gaze_data.
:param record2: element of self.gaze_data.
:param t: timestamp to calculate interpolation.
"""
w1 = (record2[0]-t)/(record2[0]-record1[0])
w2 = (t-record1[0])/(record2[0]-record1[0])
#left eye
if record1[4] == 0 and record2[4] == 0:
ldata = record1[1:5]
elif record1[4] == 0:
ldata = record2[1:5]
elif record2[4] == 0:
ldata = record1[1:5]
else:
ldata = (w1*record1[1] + w2*record2[1],
w1*record1[2] + w2*record2[2],
w1*record1[3] + w2*record2[3],
1)
#right eye
if record1[8] == 0 and record2[8] == 0:
rdata = record1[5:9]
elif record1[4] == 0:
rdata = record2[5:9]
elif record2[4] == 0:
rdata = record1[5:9]
else:
rdata = (w1*record1[5] + w2*record2[5],
w1*record1[6] + w2*record2[6],
w1*record1[7] + w2*record2[7],
1)
return (t,) + ldata + rdata
| 40.729008 | 249 | 0.554244 |
f75d76b3504e0159bc48b1ec6fe8e184887f0e71 | 213 | py | Python | api/scrapers.py | evilsloth/a4kScrapers-server | 94bd90eccb13cc7fc4a5fc8999ff2f621b94344b | [
"MIT"
] | null | null | null | api/scrapers.py | evilsloth/a4kScrapers-server | 94bd90eccb13cc7fc4a5fc8999ff2f621b94344b | [
"MIT"
] | null | null | null | api/scrapers.py | evilsloth/a4kScrapers-server | 94bd90eccb13cc7fc4a5fc8999ff2f621b94344b | [
"MIT"
] | null | null | null | from providers.a4kScrapers import en as scrapers
from flask_restful import Resource, request
from flask import jsonify
class Scrapers(Resource):
def get(self):
return jsonify(scrapers.get_torrent())
| 23.666667 | 48 | 0.774648 |
bf6e7ae318ab1c53f37b84a192077871655f8195 | 6,433 | py | Python | fn/underscore.py | bmintz/fn.py | df53f5d6bf0e94a37f44f6be57d1c87c9b7a6c26 | [
"Apache-2.0"
] | 2,260 | 2015-01-01T22:32:23.000Z | 2022-03-31T10:33:13.000Z | fn/underscore.py | Digenis/fn.py | df53f5d6bf0e94a37f44f6be57d1c87c9b7a6c26 | [
"Apache-2.0"
] | 15 | 2015-04-24T04:37:38.000Z | 2022-03-25T18:13:22.000Z | fn/underscore.py | Digenis/fn.py | df53f5d6bf0e94a37f44f6be57d1c87c9b7a6c26 | [
"Apache-2.0"
] | 164 | 2015-01-10T23:32:17.000Z | 2022-03-07T02:54:43.000Z | import re
import operator
import string
import random
from sys import version_info
from itertools import repeat, count
from .op import identity, apply, flip
from .uniform import map, zip
from .func import F
div = operator.div if version_info[0] == 2 else operator.truediv
letters = string.letters if version_info[0] == 2 else string.ascii_letters
def _random_name():
return "".join(random.choice(letters) for _ in range(14))
def fmap(f, format):
def applyier(self, other):
fmt = "(%s)" % format.replace("self", self._format)
if isinstance(other, self.__class__):
return self.__class__((f, self, other),
fmt.replace("other", other._format),
dict(list(self._format_args.items()) + list(other._format_args.items())),
self._arity + other._arity)
else:
call = F(flip(f), other) << F(self)
name = _random_name()
return self.__class__(call,
fmt.replace("other", "%%(%s)r" % name),
dict(list(self._format_args.items()) + [(name, other)]),
self._arity)
return applyier
class ArityError(TypeError):
def __str__(self):
return "{0!r} expected {1} arguments, got {2}".format(*self.args)
def unary_fmap(f, format):
def applyier(self):
fmt = "(%s)" % format.replace("self", self._format)
return self.__class__(F(self) << f, fmt, self._format_args, self._arity)
return applyier
class _Callable(object):
__slots__ = "_callback", "_format", "_format_args", "_arity"
# Do not use "flipback" approach for underscore callable,
# see https://github.com/kachayev/fn.py/issues/23
__flipback__ = None
def __init__(self, callback=identity, format="_", format_args=None, arity=1):
self._callback = callback
self._format = format
self._format_args = format_args or {}
self._arity = arity
def call(self, name, *args, **kwargs):
"""Call method from _ object by given name and arguments"""
return self.__class__(F(lambda f: apply(f, args, kwargs)) << operator.attrgetter(name) << F(self))
def __getattr__(self, name):
attr_name = _random_name()
return self.__class__(F(operator.attrgetter(name)) << F(self),
"getattr(%s, %%(%s)r)" % (self._format, attr_name),
dict(list(self._format_args.items()) + [(attr_name,name)]),
self._arity)
def __getitem__(self, k):
if isinstance(k, self.__class__):
return self.__class__((operator.getitem, self, k),
"%s[%s]" % (self._format, k._format),
dict(list(self._format_args.items()) + list(k._format_args.items())),
self._arity + k._arity)
item_name = _random_name()
return self.__class__(F(operator.itemgetter(k)) << F(self),
"%s[%%(%s)r]" % (self._format,item_name),
dict(list(self._format_args.items()) + [(item_name,k)]),
self._arity)
def __str__(self):
"""Build readable representation for function
(_ < 7): (x1) => (x1 < 7)
(_ + _*10): (x1, x2) => (x1 + (x2*10))
"""
# args iterator with produce infinite sequence
# args -> (x1, x2, x3, ...)
args = map("".join, zip(repeat("x"), map(str, count(1))))
l, r = [], self._format
# replace all "_" signs from left to right side
while r.count("_"):
n = next(args)
r = r.replace("_", n, 1)
l.append(n)
r = r % self._format_args
return "({left}) => {right}".format(left=", ".join(l), right=r)
def __repr__(self):
"""Return original function notation to ensure that eval(repr(f)) == f"""
return re.sub(r"x\d+", "_", str(self).split("=>", 1)[1].strip())
def __call__(self, *args):
if len(args) != self._arity:
raise ArityError(self, self._arity, len(args))
if not isinstance(self._callback, tuple):
return self._callback(*args)
f, left, right = self._callback
return f(left(*args[:left._arity]), right(*args[left._arity:]))
__add__ = fmap(operator.add, "self + other")
__mul__ = fmap(operator.mul, "self * other")
__sub__ = fmap(operator.sub, "self - other")
__mod__ = fmap(operator.mod, "self %% other")
__pow__ = fmap(operator.pow, "self ** other")
__and__ = fmap(operator.and_, "self & other")
__or__ = fmap(operator.or_, "self | other")
__xor__ = fmap(operator.xor, "self ^ other")
__div__ = fmap(div, "self / other")
__divmod__ = fmap(divmod, "self / other")
__floordiv__ = fmap(operator.floordiv, "self / other")
__truediv__ = fmap(operator.truediv, "self / other")
__lshift__ = fmap(operator.lshift, "self << other")
__rshift__ = fmap(operator.rshift, "self >> other")
__lt__ = fmap(operator.lt, "self < other")
__le__ = fmap(operator.le, "self <= other")
__gt__ = fmap(operator.gt, "self > other")
__ge__ = fmap(operator.ge, "self >= other")
__eq__ = fmap(operator.eq, "self == other")
__ne__ = fmap(operator.ne, "self != other")
__neg__ = unary_fmap(operator.neg, "-self")
__pos__ = unary_fmap(operator.pos, "+self")
__invert__ = unary_fmap(operator.invert, "~self")
__radd__ = fmap(flip(operator.add), "other + self")
__rmul__ = fmap(flip(operator.mul), "other * self")
__rsub__ = fmap(flip(operator.sub), "other - self")
__rmod__ = fmap(flip(operator.mod), "other %% self")
__rpow__ = fmap(flip(operator.pow), "other ** self")
__rdiv__ = fmap(flip(div), "other / self")
__rdivmod__ = fmap(flip(divmod), "other / self")
__rtruediv__ = fmap(flip(operator.truediv), "other / self")
__rfloordiv__ = fmap(flip(operator.floordiv), "other / self")
__rlshift__ = fmap(flip(operator.lshift), "other << self")
__rrshift__ = fmap(flip(operator.rshift), "other >> self")
__rand__ = fmap(flip(operator.and_), "other & self")
__ror__ = fmap(flip(operator.or_), "other | self")
__rxor__ = fmap(flip(operator.xor), "other ^ self")
shortcut = _Callable()
| 39.22561 | 107 | 0.577336 |
f797196ec33460a1088aedc68638d6a62842b147 | 3,501 | py | Python | examples/dfp/v201502/inventory_service/get_ad_unit_hierarchy.py | cmm08/googleads-python-lib | 97743df32eff92cf00cb8beaddcda42dfa0a37f4 | [
"Apache-2.0"
] | 1 | 2018-09-06T18:50:58.000Z | 2018-09-06T18:50:58.000Z | examples/dfp/v201502/inventory_service/get_ad_unit_hierarchy.py | cmm08/googleads-python-lib | 97743df32eff92cf00cb8beaddcda42dfa0a37f4 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201502/inventory_service/get_ad_unit_hierarchy.py | cmm08/googleads-python-lib | 97743df32eff92cf00cb8beaddcda42dfa0a37f4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code gets the ad unit hierarchy and displays it as a tree.
To create ad units, run create_ad_units.py
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
inventory_service = client.GetService('InventoryService', version='v201502')
statement = dfp.FilterStatement()
all_ad_units = []
# Get ad units by statement.
while True:
response = inventory_service.getAdUnitsByStatement(
statement.ToStatement())
if 'results' in response:
all_ad_units.extend(response['results'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Find the root ad unit. root_ad_unit can also be set to child unit to only
# build and display a portion of the tree.
query = 'WHERE parentId IS NULL'
root_statement = dfp.FilterStatement(query)
response = inventory_service.getAdUnitsByStatement(
root_statement.ToStatement())
root_ad_unit = response['results']
if root_ad_unit:
BuildAndDisplayAdUnitTree(root_ad_unit, all_ad_units)
else:
print 'Could not build tree. No root ad unit found.'
def DisplayAdUnitTree(root_ad_unit, ad_unit_tree, depth=0):
"""Helper for displaying ad unit tree.
Args:
root_ad_unit: dict the root ad unit.
ad_unit_tree: dict the tree of ad units.
[optional]
depth: int the depth the tree has reached.
"""
print '%s%s (%s)' % (GenerateTab(depth), root_ad_unit['name'],
root_ad_unit['id'])
if root_ad_unit['id'] in ad_unit_tree:
for child in ad_unit_tree[root_ad_unit['id']]:
DisplayAdUnitTree(child, ad_unit_tree, depth+1)
def GenerateTab(depth):
"""Generate tabs to represent branching to children.
Args:
depth: int the depth the tree has reached.
Returns:
string inserted in front of the root unit.
"""
tab_list = []
if depth > 0:
tab_list.append(' ')
tab_list.append('| ' * depth)
tab_list.append('+--')
return ''.join(tab_list)
def BuildAndDisplayAdUnitTree(root_ad_unit, all_ad_units):
"""Create an ad unit tree and display it.
Args:
root_ad_unit: dict the root ad unit to build the tree under.
all_ad_units: list the list of all ad units to build the tree with.
"""
tree = {}
for ad_unit in all_ad_units:
if 'parentId' in ad_unit:
if ad_unit['parentId'] not in tree:
tree[ad_unit['parentId']] = []
tree[ad_unit['parentId']].append(ad_unit)
DisplayAdUnitTree(root_ad_unit, tree)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 30.181034 | 78 | 0.716938 |
08e6e9b982b8bb60fb316830afa3dcc49dc49452 | 2,434 | py | Python | plugin.video.fanfilm/resources/lib/resolvers/googleplus.py | mrknow/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 105 | 2015-11-28T00:03:11.000Z | 2021-05-05T20:47:42.000Z | plugin.video.fanfilm/resources/lib/resolvers/googleplus.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 918 | 2015-11-28T14:12:40.000Z | 2022-03-23T20:24:49.000Z | plugin.video.fanfilm/resources/lib/resolvers/googleplus.py | rrosajp/filmkodi | 0162cde9ae25ddbf4a69330948714833ff2f78c9 | [
"Apache-2.0"
] | 111 | 2015-12-01T14:06:10.000Z | 2020-08-01T10:44:39.000Z | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.libraries import client
def resolve(url):
try:
id = (urlparse.urlparse(url).path).split('/')[-1]
result = client.request(url)
result = result.replace('\r','').replace('\n','').replace('\t','')
result = result.split('"%s"' % id)[-1].split(']]')[0]
result = re.compile('\d*,\d*,\d*,"(.+?)"').findall(result)
result = [i.replace('\\u003d','=').replace('\\u0026','&') for i in result][::-1]
result = sum([tag(i) for i in result], [])
url = []
try: url += [[i for i in result if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in result if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
| 34.28169 | 88 | 0.555053 |
99ec2574b66c5f3e31a02a27bcff960d7755137e | 4,124 | py | Python | champ/champ_base/scripts/imu_relay.py | billynugrahas/oped_quadruped | ab4bd558f30b76da385b4ef8a5d13c0e0a188d56 | [
"BSD-3-Clause"
] | 2 | 2020-12-21T12:17:01.000Z | 2021-12-06T04:43:58.000Z | champ/champ_base/scripts/imu_relay.py | billynugrahas/oped_quadruped | ab4bd558f30b76da385b4ef8a5d13c0e0a188d56 | [
"BSD-3-Clause"
] | null | null | null | champ/champ_base/scripts/imu_relay.py | billynugrahas/oped_quadruped | ab4bd558f30b76da385b4ef8a5d13c0e0a188d56 | [
"BSD-3-Clause"
] | 1 | 2021-04-29T14:52:11.000Z | 2021-04-29T14:52:11.000Z | #!/usr/bin/env python
'''
Copyright (c) 2019-2020, Juan Miguel Jimeno
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
import champ_msgs.msg
import sensor_msgs.msg
class IMURelay:
def __init__(self):
rospy.Subscriber("imu/raw", champ_msgs.msg.Imu, self.imu_callback)
self.imu_pub = rospy.Publisher('imu/data', sensor_msgs.msg.Imu, queue_size = 100)
self.mag_pub = rospy.Publisher('imu/mag', sensor_msgs.msg.MagneticField, queue_size = 100)
self.has_imu = rospy.get_param("pose_relay/has_imu", True)
def imu_callback(self, imu):
if not self.has_imu:
return
imu_data_msg = sensor_msgs.msg.Imu()
imu_mag_msg = sensor_msgs.msg.MagneticField()
imu_data_msg.header.stamp = rospy.Time.now()
imu_data_msg.header.frame_id = "imu_link"
imu_data_msg.orientation.w = imu.orientation.w
imu_data_msg.orientation.x = imu.orientation.x
imu_data_msg.orientation.y = imu.orientation.y
imu_data_msg.orientation.z = imu.orientation.z
imu_data_msg.linear_acceleration.x = imu.linear_acceleration.x
imu_data_msg.linear_acceleration.y = imu.linear_acceleration.y
imu_data_msg.linear_acceleration.z = imu.linear_acceleration.z
imu_data_msg.angular_velocity.x = imu.angular_velocity.x
imu_data_msg.angular_velocity.y = imu.angular_velocity.y
imu_data_msg.angular_velocity.z = imu.angular_velocity.z
imu_data_msg.orientation_covariance[0] = 0.0025
imu_data_msg.orientation_covariance[4] = 0.0025
imu_data_msg.orientation_covariance[8] = 0.0025
imu_data_msg.angular_velocity_covariance[0] = 0.000001
imu_data_msg.angular_velocity_covariance[4] = 0.000001
imu_data_msg.angular_velocity_covariance[8] = 0.000001
imu_data_msg.linear_acceleration_covariance[0] = 0.0001
imu_data_msg.linear_acceleration_covariance[4] = 0.0001
imu_data_msg.linear_acceleration_covariance[8] = 0.0001
self.imu_pub.publish(imu_data_msg)
imu_mag_msg.header.stamp = rospy.Time.now()
imu_mag_msg.header.frame_id = "imu_link"
imu_mag_msg.magnetic_field.x = imu.magnetic_field.x
imu_mag_msg.magnetic_field.y = imu.magnetic_field.y
imu_mag_msg.magnetic_field.z = imu.magnetic_field.z
imu_mag_msg.magnetic_field_covariance[0] = 0.000001
imu_mag_msg.magnetic_field_covariance[4] = 0.000001
imu_mag_msg.magnetic_field_covariance[8] = 0.000001
self.mag_pub.publish(imu_mag_msg)
if __name__ == "__main__":
rospy.init_node('champ_imu_relay', anonymous=True)
i = IMURelay()
rospy.spin() | 43.87234 | 98 | 0.742241 |
d51dec1c5ffce3376c45af53c8d57bc891610c58 | 10,348 | py | Python | modin/core/execution/ray/generic/io/io.py | novichkovg/modin | 0e36e22624ac1f0849f390a7705aa98f71e00d5d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/execution/ray/generic/io/io.py | novichkovg/modin | 0e36e22624ac1f0849f390a7705aa98f71e00d5d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | modin/core/execution/ray/generic/io/io.py | novichkovg/modin | 0e36e22624ac1f0849f390a7705aa98f71e00d5d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""The module holds base class implementing required I/O over Ray."""
import io
import os
import pandas
from modin.core.io import BaseIO
from modin.core.execution.ray.common.utils import SignalActor
import ray
class RayIO(BaseIO):
"""Base class for doing I/O operations over Ray."""
@classmethod
def to_sql(cls, qc, **kwargs):
"""
Write records stored in the `qc` to a SQL database.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want to run ``to_sql`` on.
**kwargs : dict
Parameters for ``pandas.to_sql(**kwargs)``.
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.getitem_row_array([0]).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df):
"""
Override column names in the wrapped dataframe and convert it to SQL.
Notes
-----
This function returns an empty ``pandas.DataFrame`` because ``apply_full_axis``
expects a Frame object as a result of operation (and ``to_sql`` has no dataframe result).
"""
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame.apply_full_axis(1, func, new_index=[], new_columns=[])
# FIXME: we should be waiting for completion less expensievely, maybe use _modin_frame.materialize()?
result.to_pandas() # blocking operation
@staticmethod
def _to_csv_check_support(kwargs):
"""
Check if parallel version of ``to_csv`` could be used.
Parameters
----------
kwargs : dict
Keyword arguments passed to ``.to_csv()``.
Returns
-------
bool
Whether parallel version of ``to_csv`` is applicable.
"""
path_or_buf = kwargs["path_or_buf"]
compression = kwargs["compression"]
if not isinstance(path_or_buf, str):
return False
# case when the pointer is placed at the beginning of the file.
if "r" in kwargs["mode"] and "+" in kwargs["mode"]:
return False
# encodings with BOM don't support;
# instead of one mark in result bytes we will have them by the number of partitions
# so we should fallback in pandas for `utf-16`, `utf-32` with all aliases, in instance
# (`utf_32_be`, `utf_16_le` and so on)
if kwargs["encoding"] is not None:
encoding = kwargs["encoding"].lower()
if "u" in encoding or "utf" in encoding:
if "16" in encoding or "32" in encoding:
return False
if compression is None or not compression == "infer":
return False
if any((path_or_buf.endswith(ext) for ext in [".gz", ".bz2", ".zip", ".xz"])):
return False
return True
@classmethod
def to_csv(cls, qc, **kwargs):
"""
Write records stored in the `qc` to a CSV file.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want to run ``to_csv`` on.
**kwargs : dict
Parameters for ``pandas.to_csv(**kwargs)``.
"""
if not cls._to_csv_check_support(kwargs):
return BaseIO.to_csv(qc, **kwargs)
signals = SignalActor.remote(len(qc._modin_frame._partitions) + 1)
def func(df, **kw):
"""
Dump a chunk of rows as csv, then save them to target maintaining order.
Parameters
----------
df : pandas.DataFrame
A chunk of rows to write to a CSV file.
**kw : dict
Arguments to pass to ``pandas.to_csv(**kw)`` plus an extra argument
`partition_idx` serving as chunk index to maintain rows order.
"""
partition_idx = kw["partition_idx"]
# the copy is made to not implicitly change the input parameters;
# to write to an intermediate buffer, we need to change `path_or_buf` in kwargs
csv_kwargs = kwargs.copy()
if partition_idx != 0:
# we need to create a new file only for first recording
# all the rest should be recorded in appending mode
if "w" in csv_kwargs["mode"]:
csv_kwargs["mode"] = csv_kwargs["mode"].replace("w", "a")
# It is enough to write the header for the first partition
csv_kwargs["header"] = False
# for parallelization purposes, each partition is written to an intermediate buffer
path_or_buf = csv_kwargs["path_or_buf"]
is_binary = "b" in csv_kwargs["mode"]
csv_kwargs["path_or_buf"] = io.BytesIO() if is_binary else io.StringIO()
df.to_csv(**csv_kwargs)
content = csv_kwargs["path_or_buf"].getvalue()
csv_kwargs["path_or_buf"].close()
# each process waits for its turn to write to a file
ray.get(signals.wait.remote(partition_idx))
# preparing to write data from the buffer to a file
with pandas.io.common.get_handle(
path_or_buf,
# in case when using URL in implicit text mode
# pandas try to open `path_or_buf` in binary mode
csv_kwargs["mode"] if is_binary else csv_kwargs["mode"] + "t",
encoding=kwargs["encoding"],
errors=kwargs["errors"],
compression=kwargs["compression"],
storage_options=kwargs["storage_options"],
is_text=False,
) as handles:
handles.handle.write(content)
# signal that the next process can start writing to the file
ray.get(signals.send.remote(partition_idx + 1))
# used for synchronization purposes
return pandas.DataFrame()
# signaling that the partition with id==0 can be written to the file
ray.get(signals.send.remote(0))
result = qc._modin_frame._partition_mgr_cls.map_axis_partitions(
axis=1,
partitions=qc._modin_frame._partitions,
map_func=func,
keep_partitioning=True,
lengths=None,
enumerate_partitions=True,
max_retries=0,
)
# pending completion
ray.get([partition.oid for partition in result.flatten()])
@staticmethod
def _to_parquet_check_support(kwargs):
"""
Check if parallel version of `to_parquet` could be used.
Parameters
----------
kwargs : dict
Keyword arguments passed to `.to_parquet()`.
Returns
-------
bool
Whether parallel version of `to_parquet` is applicable.
"""
path = kwargs["path"]
compression = kwargs["compression"]
if not isinstance(path, str):
return False
if any((path.endswith(ext) for ext in [".gz", ".bz2", ".zip", ".xz"])):
return False
if compression is None or not compression == "snappy":
return False
return True
@classmethod
def to_parquet(cls, qc, **kwargs):
"""
Write a ``DataFrame`` to the binary parquet format.
Parameters
----------
qc : BaseQueryCompiler
The query compiler of the Modin dataframe that we want to run `to_parquet` on.
**kwargs : dict
Parameters for `pandas.to_parquet(**kwargs)`.
"""
if not cls._to_parquet_check_support(kwargs):
return BaseIO.to_parquet(qc, **kwargs)
def func(df, **kw):
"""
Dump a chunk of rows as parquet, then save them to target maintaining order.
Parameters
----------
df : pandas.DataFrame
A chunk of rows to write to a parquet file.
**kw : dict
Arguments to pass to ``pandas.to_parquet(**kwargs)`` plus an extra argument
`partition_idx` serving as chunk index to maintain rows order.
"""
output_path = kwargs["path"]
compression = kwargs["compression"]
partition_idx = kw["partition_idx"]
if not os.path.exists(output_path):
os.makedirs(output_path)
kwargs[
"path"
] = f"{output_path}/part-{partition_idx:04d}.{compression}.parquet"
df.to_parquet(**kwargs)
return pandas.DataFrame()
result = qc._modin_frame._partition_mgr_cls.map_axis_partitions(
axis=1,
partitions=qc._modin_frame._partitions,
map_func=func,
keep_partitioning=True,
lengths=None,
enumerate_partitions=True,
)
ray.get([part.oid for row in result for part in row])
| 39.346008 | 109 | 0.589196 |
892e71cbef0967bcc29e4f58d90e2915f6f56572 | 1,087 | py | Python | plog/working5.py | Strangemother/PlogBlock | e791215b197e1e06daa569742a118b88dc8a25ac | [
"MIT"
] | null | null | null | plog/working5.py | Strangemother/PlogBlock | e791215b197e1e06daa569742a118b88dc8a25ac | [
"MIT"
] | null | null | null | plog/working5.py | Strangemother/PlogBlock | e791215b197e1e06daa569742a118b88dc8a25ac | [
"MIT"
] | null | null | null | from api import Plog
from patterns import PlogLine, PlogBlock
block = PlogBlock('Device ID:', ref='Device')
block.header.ref='device_id'
block.footer = PlogLine('----------', ref='footer').anything()
lines = {}
lines['entry_address'] = PlogLine('IP address:')
lines['platform'] = PlogLine('Platform:')
lines['interface'] = PlogLine('Interface:')
lines['hold_time'] = PlogLine('Holdtime').maybe(' ').then(':')
lines['version'] = PlogLine('Version').maybe(' ').then(':').multiline()
lines['version'] = PlogLine('advertisement version:')
lines['duplex'] = PlogLine('Duplex:')
lines['power_drawn'] = PlogLine('Power drawn:')
lines['power_request_id'] = PlogLine('Power request id:')
lines['power_management_id'] = PlogLine('Power management id:')
lines['power_request_levels'] = PlogLine('Power request levels are:')
block.add_lines(**lines)
# new parser
f = open('test_data2.txt', 'r')
# plog = Plog(f, whitespace='|')
plog = Plog(f, whitespace='|', terminator=',')
# run it
plog.add_block(block)
blocks = plog.run()
for block in blocks:
if block.valid():
print block.as_dict()
| 28.605263 | 71 | 0.687213 |
ebff409da4b845c88f9cc79e3c84a97a8613ea84 | 28,740 | py | Python | test/orm/test_utils.py | lambdanis/sqlalchemy | f94648bb922a73423d73f17c2148253fcc5893b9 | [
"MIT"
] | null | null | null | test/orm/test_utils.py | lambdanis/sqlalchemy | f94648bb922a73423d73f17c2148253fcc5893b9 | [
"MIT"
] | null | null | null | test/orm/test_utils.py | lambdanis/sqlalchemy | f94648bb922a73423d73f17c2148253fcc5893b9 | [
"MIT"
] | null | null | null | from sqlalchemy import Column
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.orm import util as orm_util
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.path_registry import PathRegistry
from sqlalchemy.orm.path_registry import RootRegistry
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.util import compat
from test.orm import _fixtures
from .inheritance import _poly_fixtures
class AliasedClassTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, cls, properties={}):
table = Table(
"point",
MetaData(),
Column("id", Integer(), primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
mapper(cls, table, properties=properties)
return table
def test_simple(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert alias.id
assert alias.x
assert alias.y
assert Point.id.__clause_element__().table is table
assert alias.id.__clause_element__().table is not table
def test_not_instantiatable(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert_raises(TypeError, alias)
def test_instancemethod(self):
class Point(object):
def zero(self):
self.x, self.y = 0, 0
table = self._fixture(Point)
alias = aliased(Point)
assert Point.zero
assert getattr(alias, "zero")
def test_classmethod(self):
class Point(object):
@classmethod
def max_x(cls):
return 100
table = self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert alias.max_x
assert Point.max_x() == alias.max_x() == 100
def test_simple_property(self):
class Point(object):
@property
def max_x(self):
return 100
table = self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert Point.max_x != 100
assert alias.max_x
assert Point.max_x is alias.max_x
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return "method"
class Point(object):
center = (0, 0)
@descriptor
def thing(self, arg):
return arg.center
table = self._fixture(Point)
alias = aliased(Point)
assert Point.thing != (0, 0)
assert Point().thing == (0, 0)
assert Point.thing.method() == "method"
assert alias.thing != (0, 0)
assert alias.thing.method() == "method"
def _assert_has_table(self, expr, table):
from sqlalchemy import Column # override testlib's override
for child in expr.get_children():
if isinstance(child, Column):
assert child.table is table
def test_hybrid_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_method
def left_of(self, other):
return self.x < other.x
self._fixture(Point)
alias = aliased(Point)
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.left_of(Point)),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x < point.x",
)
def test_hybrid_descriptor_two(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point.double_x")
eq_(str(alias.double_x), "AliasedClass_Point.double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_hybrid_descriptor_three(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def x_alone(self):
return self.x
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_alone), "Point.x_alone")
eq_(str(alias.x_alone), "AliasedClass_Point.x_alone")
# from __clause_element__() perspective, Point.x_alone
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_alone.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_alone + 1), "point_1.x + :x_1")
is_(Point.x_alone.__clause_element__(), Point.x.__clause_element__())
eq_(str(alias.x_alone == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_alone == alias.x), "point_1.x = point_2.x")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.x_alone > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_proxy_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
self._fixture(Point, properties={"x_syn": synonym("x")})
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x_syn")
eq_(str(alias.x_syn), "AliasedClass_Point.x_syn")
sess = Session()
self.assert_compile(
sess.query(alias.x_syn).filter(alias.x_syn > Point.x_syn),
"SELECT point_1.x AS point_1_x FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_one(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "x_syn":
return cls.x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x")
eq_(str(alias.x_syn), "AliasedClass_Point.x")
# from __clause_element__() perspective, Point.x_syn
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_syn.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_syn + 1), "point_1.x + :x_1")
is_(Point.x_syn.__clause_element__(), Point.x.__clause_element__())
eq_(str(alias.x_syn == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_syn == alias.x), "point_1.x = point_2.x")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.x_syn > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_two(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "double_x":
return cls._impl_double_x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
@hybrid_property
def _impl_double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point._impl_double_x")
eq_(str(alias.double_x), "AliasedClass_Point._impl_double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_meta_getattr_three(self):
class MetaPoint(type):
def __getattr__(cls, key):
@hybrid_property
def double_x(me):
return me.x * 2
if key == "double_x":
return double_x.__get__(None, cls)
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_parententity_vs_parentmapper(self):
class Point(object):
pass
self._fixture(Point, properties={"x_syn": synonym("x")})
pa = aliased(Point)
is_(Point.x_syn._parententity, inspect(Point))
is_(Point.x._parententity, inspect(Point))
is_(Point.x_syn._parentmapper, inspect(Point))
is_(Point.x._parentmapper, inspect(Point))
is_(
Point.x_syn.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
pa = aliased(Point)
is_(pa.x_syn._parententity, inspect(pa))
is_(pa.x._parententity, inspect(pa))
is_(pa.x_syn._parentmapper, inspect(Point))
is_(pa.x._parentmapper, inspect(Point))
is_(
pa.x_syn.__clause_element__()._annotations["parententity"],
inspect(pa),
)
is_(
pa.x.__clause_element__()._annotations["parententity"], inspect(pa)
)
is_(
pa.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
pa.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
class IdentityKeyTest(_fixtures.FixtureTest):
run_inserts = None
def test_identity_key_1(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, [1])
eq_(key, (User, (1,), None))
key = orm_util.identity_key(User, ident=[1])
eq_(key, (User, (1,), None))
def test_identity_key_scalar(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, 1)
eq_(key, (User, (1,), None))
key = orm_util.identity_key(User, ident=1)
eq_(key, (User, (1,), None))
def test_identity_key_2(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = User(name="u1")
s.add(u)
s.flush()
key = orm_util.identity_key(instance=u)
eq_(key, (User, (u.id,), None))
def test_identity_key_3(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
row = {users.c.id: 1, users.c.name: "Frank"}
key = orm_util.identity_key(User, row=row)
eq_(key, (User, (1,), None))
def test_identity_key_token(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, [1], identity_token="token")
eq_(key, (User, (1,), "token"))
key = orm_util.identity_key(User, ident=[1], identity_token="token")
eq_(key, (User, (1,), "token"))
class PathRegistryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_root_registry(self):
umapper = inspect(self.classes.User)
is_(RootRegistry()[umapper], umapper._path_registry)
eq_(RootRegistry()[umapper], PathRegistry.coerce((umapper,)))
def test_expand(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper,))
eq_(
path[umapper.attrs.addresses][amapper][
amapper.attrs.email_address
],
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_entity_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper,))
is_(bool(path), True)
def test_key_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses))
is_(bool(path), True)
def test_aliased_class(self):
User = self.classes.User
ua = aliased(User)
ua_insp = inspect(ua)
path = PathRegistry.coerce((ua_insp, ua_insp.mapper.attrs.addresses))
assert path.parent.is_aliased_class
def test_indexed_entity(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(path[0], umapper)
is_(path[2], amapper)
def test_indexed_key(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1], umapper.attrs.addresses)
eq_(path[3], amapper.attrs.email_address)
def test_slice(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1:3], (umapper.attrs.addresses, amapper))
def test_addition(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(
p1 + p2,
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_length(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
pneg1 = PathRegistry.coerce(())
p0 = PathRegistry.coerce((umapper,))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(len(pneg1), 0)
eq_(len(p0), 1)
eq_(len(p1), 2)
eq_(len(p2), 3)
eq_(len(p3), 4)
eq_(pneg1.length, 0)
eq_(p0.length, 1)
eq_(p1.length, 2)
eq_(p2.length, 3)
eq_(p3.length, 4)
def test_eq(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((umapper, umapper.attrs.name))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p6 = PathRegistry.coerce(
(amapper, amapper.attrs.user, umapper, umapper.attrs.addresses)
)
p7 = PathRegistry.coerce(
(
amapper,
amapper.attrs.user,
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(p1 == p2, True)
is_(p1 == p3, False)
is_(p1 == p4, False)
is_(p1 == p5, False)
is_(p6 == p7, False)
is_(p6 == p7.parent.parent, True)
is_(p1 != p2, False)
is_(p1 != p3, True)
is_(p1 != p4, True)
is_(p1 != p5, True)
def test_contains_mapper(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
assert p1.contains_mapper(umapper)
assert not p1.contains_mapper(amapper)
def test_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(p1.path, (umapper, umapper.attrs.addresses))
eq_(p2.path, (umapper, umapper.attrs.addresses, amapper))
eq_(p3.path, (amapper, amapper.attrs.email_address))
def test_registry_set(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
def test_registry_get(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
eq_(p1.get(reg, "p1key"), "p1value")
eq_(p2.get(reg, "p2key"), "p2value")
eq_(p2.get(reg, "p1key"), None)
eq_(p3.get(reg, "p3key"), "p3value")
eq_(p3.get(reg, "p1key"), None)
def test_registry_contains(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
assert p1.contains(reg, "p1key")
assert not p1.contains(reg, "p2key")
assert p3.contains(reg, "p3key")
assert not p2.contains(reg, "fake")
def test_registry_setdefault(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
reg.update({("p1key", p1.path): "p1value"})
p1.setdefault(reg, "p1key", "p1newvalue_a")
p1.setdefault(reg, "p1key_new", "p1newvalue_b")
p2.setdefault(reg, "p2key", "p2newvalue")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p1key_new", p1.path): "p1newvalue_b",
("p2key", p2.path): "p2newvalue",
},
)
def test_serialize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(p1.serialize(), [(User, "addresses"), (Address, "email_address")])
eq_(p2.serialize(), [(User, "addresses"), (Address, None)])
eq_(p3.serialize(), [(User, "addresses")])
def test_deseralize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(
PathRegistry.deserialize(
[(User, "addresses"), (Address, "email_address")]
),
p1,
)
eq_(
PathRegistry.deserialize([(User, "addresses"), (Address, None)]),
p2,
)
eq_(PathRegistry.deserialize([(User, "addresses")]), p3)
class PathRegistryInhTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_plain(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce((pmapper, emapper.attrs.machines))
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(p1.path, (emapper, emapper.attrs.machines))
def test_plain_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce(
(cmapper, cmapper.attrs.employees, pmapper, emapper.attrs.machines)
)
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.machines,
),
)
def test_plain_aliased(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_alias = aliased(Person)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce((p_alias, emapper.attrs.machines))
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(p1.path, (p_alias, emapper.attrs.machines))
def test_plain_aliased_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
emapper = inspect(Engineer)
c_alias = aliased(Company)
p_alias = aliased(Person)
c_alias = inspect(c_alias)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce(
(c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines)
)
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(
p1.path,
(
c_alias,
cmapper.attrs.employees,
p_alias,
emapper.attrs.machines,
),
)
def test_with_poly_sub(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
e_poly = inspect(p_poly.Engineer)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass - the path uses _entity_for_mapper()
# to get the most specific sub-entity
eq_(p1.path, (e_poly, emapper.attrs.machines))
def test_with_poly_base(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
p_poly = inspect(p_poly)
# "name" is actually on Person, not Engineer
p1 = PathRegistry.coerce((p_poly, emapper.attrs.name))
# polymorphic AliasedClass - because "name" is on Person,
# we get Person, not Engineer
eq_(p1.path, (p_poly, pmapper.attrs.name))
def test_with_poly_use_mapper(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer], _use_mapper_path=True)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass with the "use_mapper_path" flag -
# the AliasedClass acts just like the base mapper
eq_(p1.path, (emapper, emapper.attrs.machines))
| 31.862528 | 79 | 0.576618 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.