text
stringlengths 2
999k
|
|---|
from dagster import check
from .system import SystemStepExecutionContext
class StepExecutionContext(object):
__slots__ = ['_system_step_execution_context', '_legacy_context']
def __init__(self, system_step_execution_context):
self._system_step_execution_context = check.inst_param(
system_step_execution_context,
'system_step_execution_context',
SystemStepExecutionContext,
)
@property
def file_manager(self):
return self._system_step_execution_context.file_manager
@property
def resources(self):
return self._system_step_execution_context.resources
@property
def run_id(self):
return self._system_step_execution_context.run_id
@property
def environment_dict(self):
return self._system_step_execution_context.environment_dict
@property
def pipeline_def(self):
return self._system_step_execution_context.pipeline_def
@property
def mode_def(self):
return self._system_step_execution_context.mode_def
@property
def log(self):
return self._system_step_execution_context.log
@property
def solid_handle(self):
return self._system_step_execution_context.solid_handle
@property
def solid(self):
return self._system_step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self):
return self._system_step_execution_context.pipeline_def.get_solid(
self.solid_handle
).definition
def has_tag(self, key):
return self._system_step_execution_context.has_tag(key)
def get_tag(self, key):
return self._system_step_execution_context.get_tag(key)
def get_system_context(self):
'''
This allows advanced users (e.g. framework authors) to punch through
to the underlying system context.
'''
return self._system_step_execution_context
|
from setuptools import setup, find_packages
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
setup(
name="yootto",
version="0.1.5",
description="yootto(ヨーッと) is tiny YouTube Music unofficial uploader",
author="yanoshi",
author_email="",
url="https://github.com/yanoshi/yootto",
packages=find_packages(),
install_requires=install_requirements,
python_requires='>3.6',
entry_points={
"console_scripts": [
"yootto=yootto.core:main",
]
},
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from typing import Mapping, Union
from six import string_types
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RelationTypes
from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
# (?<!stuff) matches if the current position in the string is not preceded
# by a match for 'stuff'.
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
def prune_event(event):
""" Returns a pruned version of the given event, which removes all keys we
don't know about or think could potentially be dodgy.
This is used when we "redact" an event. We want to remove all fields that
the user has specified, but we do want to keep necessary information like
type, state_key etc.
Args:
event (FrozenEvent)
Returns:
FrozenEvent
"""
pruned_event_dict = prune_event_dict(event.get_dict())
from . import event_type_from_format_version
pruned_event = event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
# Mark the event as redacted
pruned_event.internal_metadata.redacted = True
return pruned_event
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
Args:
event_dict (dict)
Returns:
dict: A copy of the pruned event dict
"""
allowed_keys = [
"event_id",
"sender",
"room_id",
"hashes",
"signatures",
"content",
"type",
"state_key",
"depth",
"prev_events",
"prev_state",
"auth_events",
"origin",
"origin_server_ts",
"membership",
]
event_type = event_dict["type"]
new_content = {}
def add_fields(*fields):
for field in fields:
if field in event_dict["content"]:
new_content[field] = event_dict["content"][field]
if event_type == EventTypes.Member:
add_fields("membership")
elif event_type == EventTypes.Create:
add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
elif event_type == EventTypes.PowerLevels:
add_fields(
"users",
"users_default",
"events",
"events_default",
"state_default",
"ban",
"kick",
"redact",
)
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
allowed_fields["content"] = new_content
unsigned = {}
allowed_fields["unsigned"] = unsigned
event_unsigned = event_dict.get("unsigned", {})
if "age_ts" in event_unsigned:
unsigned["age_ts"] = event_unsigned["age_ts"]
if "replaces_state" in event_unsigned:
unsigned["replaces_state"] = event_unsigned["replaces_state"]
return allowed_fields
def _copy_field(src, dst, field):
"""Copy the field in 'src' to 'dst'.
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
then dst={"foo":{"bar":5}}.
Args:
src(dict): The dict to read from.
dst(dict): The dict to modify.
field(list<str>): List of keys to drill down to in 'src'.
"""
if len(field) == 0: # this should be impossible
return
if len(field) == 1: # common case e.g. 'origin_server_ts'
if field[0] in src:
dst[field[0]] = src[field[0]]
return
# Else is a nested field e.g. 'content.body'
# Pop the last field as that's the key to move across and we need the
# parent dict in order to access the data. Drill down to the right dict.
key_to_move = field.pop(-1)
sub_dict = src
for sub_field in field: # e.g. sub_field => "content"
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
sub_dict = sub_dict[sub_field]
else:
return
if key_to_move not in sub_dict:
return
# Insert the key into the output dictionary, creating nested objects
# as required. We couldn't do this any earlier or else we'd need to delete
# the empty objects if the key didn't exist.
sub_out_dict = dst
for sub_field in field:
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
sub_out_dict[key_to_move] = sub_dict[key_to_move]
def only_fields(dictionary, fields):
"""Return a new dict with only the fields in 'dictionary' which are present
in 'fields'.
If there are no event fields specified then all fields are included.
The entries may include '.' charaters to indicate sub-fields.
So ['content.body'] will include the 'body' field of the 'content' object.
A literal '.' character in a field name may be escaped using a '\'.
Args:
dictionary(dict): The dictionary to read from.
fields(list<str>): A list of fields to copy over. Only shallow refs are
taken.
Returns:
dict: A new dictionary with only the given fields. If fields was empty,
the same dictionary is returned.
"""
if len(fields) == 0:
return dictionary
# for each field, convert it:
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
# for each element of the output array of arrays:
# remove escaping so we can use the right key names.
split_fields[:] = [
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
]
output = {}
for field_array in split_fields:
_copy_field(dictionary, output, field_array)
return output
def format_event_raw(d):
return d
def format_event_for_client_v1(d):
d = format_event_for_client_v2(d)
sender = d.get("sender")
if sender is not None:
d["user_id"] = sender
copy_keys = (
"age",
"redacted_because",
"replaces_state",
"prev_content",
"invite_room_state",
)
for key in copy_keys:
if key in d["unsigned"]:
d[key] = d["unsigned"][key]
return d
def format_event_for_client_v2(d):
drop_keys = (
"auth_events",
"prev_events",
"hashes",
"signatures",
"depth",
"origin",
"prev_state",
)
for key in drop_keys:
d.pop(key, None)
return d
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
return d
def serialize_event(
e,
time_now_ms,
as_client_event=True,
event_format=format_event_for_client_v1,
token_id=None,
only_event_fields=None,
is_invite=False,
):
"""Serialize event for clients
Args:
e (EventBase)
time_now_ms (int)
as_client_event (bool)
event_format
token_id
only_event_fields
is_invite (bool): Whether this is an invite that is being sent to the
invitee
Returns:
dict
"""
# FIXME(erikj): To handle the case of presence events and the like
if not isinstance(e, EventBase):
return e
time_now_ms = int(time_now_ms)
# Should this strip out None's?
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
if "age_ts" in d["unsigned"]:
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
del d["unsigned"]["age_ts"]
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
)
if token_id is not None:
if token_id == getattr(e.internal_metadata, "token_id", None):
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None:
d["unsigned"]["transaction_id"] = txn_id
# If this is an invite for somebody else, then we don't care about the
# invite_room_state as that's meant solely for the invitee. Other clients
# will already have the state since they're in the room.
if not is_invite:
d["unsigned"].pop("invite_room_state", None)
if as_client_event:
d = event_format(d)
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
isinstance(f, string_types) for f in only_event_fields
):
raise TypeError("only_event_fields must be a list of strings")
d = only_fields(d, only_event_fields)
return d
class EventClientSerializer(object):
"""Serializes events that are to be sent to clients.
This is used for bundling extra information with any events to be sent to
clients.
"""
def __init__(self, hs):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
hs.config.experimental_msc1849_support_enabled
)
@defer.inlineCallbacks
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
"""Serializes a single event.
Args:
event (EventBase)
time_now (int): The current time in milliseconds
bundle_aggregations (bool): Whether to bundle in related events
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[dict]: The serialized event
"""
# To handle the case of presence events and the like
if not isinstance(event, EventBase):
return event
event_id = event.event_id
serialized_event = serialize_event(event, time_now, **kwargs)
# If MSC1849 is enabled then we need to look if there are any relations
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
):
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
references = yield self.store.get_relations_for_event(
event_id, RelationTypes.REFERENCE, direction="f"
)
if annotations.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.ANNOTATION] = annotations.to_dict()
if references.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REFERENCE] = references.to_dict()
edit = None
if event.type == EventTypes.Message:
edit = yield self.store.get_applicable_edit(event_id)
if edit:
# If there is an edit replace the content, preserving existing
# relations.
relations = event.content.get("m.relates_to")
serialized_event["content"] = edit.content.get("m.new_content", {})
if relations:
serialized_event["content"]["m.relates_to"] = relations
else:
serialized_event["content"].pop("m.relates_to", None)
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REPLACE] = {
"event_id": edit.event_id,
"origin_server_ts": edit.origin_server_ts,
"sender": edit.sender,
}
return serialized_event
def serialize_events(self, events, time_now, **kwargs):
"""Serializes multiple events.
Args:
event (iter[EventBase])
time_now (int): The current time in milliseconds
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[list[dict]]: The list of serialized events
"""
return yieldable_gather_results(
self.serialize_event, events, time_now=time_now, **kwargs
)
def copy_power_levels_contents(
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
):
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
Raises:
TypeError if the input does not look like a valid power levels event content
"""
if not isinstance(old_power_levels, collections.Mapping):
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
power_levels = {}
for k, v in old_power_levels.items():
if isinstance(v, int):
power_levels[k] = v
continue
if isinstance(v, collections.Mapping):
power_levels[k] = h = {}
for k1, v1 in v.items():
# we should only have one level of nesting
if not isinstance(v1, int):
raise TypeError(
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
)
h[k1] = v1
continue
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
return power_levels
|
"""
sentry.tagstore.v2.models.grouptagvalue
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2017 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.db import models, router, transaction, DataError
from django.utils import timezone
from sentry.api.serializers import Serializer, register
from sentry.db.models import (
Model, BoundedPositiveIntegerField, BaseManager, FlexibleForeignKey, sane_repr
)
class GroupTagValue(Model):
"""
Stores the total number of messages seen by a group matching
the given filter.
"""
__core__ = False
project_id = BoundedPositiveIntegerField(db_index=True)
group_id = BoundedPositiveIntegerField(db_index=True)
times_seen = BoundedPositiveIntegerField(default=0)
_key = FlexibleForeignKey('tagstore.TagKey', db_column='key_id')
_value = FlexibleForeignKey('tagstore.TagValue', db_column='value_id')
last_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'tagstore'
unique_together = (('project_id', 'group_id', '_key', '_value'), )
index_together = (('project_id', '_key', '_value', 'last_seen'), )
__repr__ = sane_repr('project_id', 'group_id', '_key', '_value')
@property
def key(self):
return self._key.key
@property
def value(self):
return self._value.value
def save(self, *args, **kwargs):
if not self.first_seen:
self.first_seen = self.last_seen
super(GroupTagValue, self).save(*args, **kwargs)
def merge_counts(self, new_group):
try:
with transaction.atomic(using=router.db_for_write(GroupTagValue)):
new_obj = GroupTagValue.objects.get(
group_id=new_group.id,
_key_id=self._key_id,
_value_id=self._value_id,
)
new_obj.update(
first_seen=min(new_obj.first_seen, self.first_seen),
last_seen=max(new_obj.last_seen, self.last_seen),
times_seen=new_obj.times_seen + self.times_seen,
)
except DataError:
# it's possible to hit an out of range value for counters
pass
@register(GroupTagValue)
class GroupTagValueSerializer(Serializer):
def get_attrs(self, item_list, user):
from sentry import tagstore
result = {}
for item in item_list:
result[item] = {
'name': tagstore.get_tag_value_label(item.key, item.value),
}
return result
def serialize(self, obj, attrs, user):
from sentry import tagstore
return {
'id': six.text_type(obj.id),
'name': attrs['name'],
'key': tagstore.get_standardized_key(obj.key),
'value': obj.value,
'count': obj.times_seen,
'lastSeen': obj.last_seen,
'firstSeen': obj.first_seen,
}
|
import os
import sys
from robotframework_ls.constants import NULL
from robocode_ls_core.robotframework_log import get_logger
log = get_logger(__name__)
def _normfile(filename):
return os.path.abspath(os.path.normpath(os.path.normcase(filename)))
def _get_libspec_mutex_name(libspec_filename):
from robocode_ls_core.system_mutex import generate_mutex_name
libspec_filename = _norm_filename(libspec_filename)
basename = os.path.basename(libspec_filename)
name = os.path.splitext(basename)[0]
return generate_mutex_name(libspec_filename, prefix="%s_" % (name,))
def _get_additional_info_filename(spec_filename):
additional_info_filename = os.path.join(spec_filename + ".m")
return additional_info_filename
def _load_library_doc_and_mtime(spec_filename, obtain_mutex=True):
"""
:param obtain_mutex:
Should be False if this is part of a bigger operation that already
has the spec_filename mutex.
"""
from robotframework_ls.impl import robot_specbuilder
from robocode_ls_core.system_mutex import timed_acquire_mutex
if obtain_mutex:
ctx = timed_acquire_mutex(_get_libspec_mutex_name(spec_filename))
else:
ctx = NULL
with ctx:
# We must load it with a mutex to avoid conflicts between generating/reading.
builder = robot_specbuilder.SpecDocBuilder()
try:
mtime = os.path.getmtime(spec_filename)
libdoc = builder.build(spec_filename)
return libdoc, mtime
except Exception:
log.exception("Error when loading spec info from: %s", spec_filename)
return None
def _load_lib_info(spec_filename, can_regenerate):
libdoc_and_mtime = _load_library_doc_and_mtime(spec_filename)
if libdoc_and_mtime is None:
return None
libdoc, mtime = libdoc_and_mtime
return _LibInfo(libdoc, mtime, spec_filename, can_regenerate)
_IS_BUILTIN = "is_builtin"
_SOURCE_TO_MTIME = "source_to_mtime"
_UNABLE_TO_LOAD = "unable_to_load"
def _create_updated_source_to_mtime(library_doc):
sources = set()
source = library_doc.source
if source is not None:
sources.add(source)
for keyword in library_doc.keywords:
source = keyword.source
if source is not None:
sources.add(source)
source_to_mtime = {}
for source in sources:
try:
source = _normfile(source)
source_to_mtime[source] = os.path.getmtime(source)
except Exception:
log.exception("Unable to load source for file: %s", source)
return source_to_mtime
def _create_additional_info(spec_filename, is_builtin, obtain_mutex=True):
try:
additional_info = {_IS_BUILTIN: is_builtin}
if is_builtin:
# For builtins we don't have to check the mtime
# (on a new version we update the folder).
return additional_info
library_doc_and_mtime = _load_library_doc_and_mtime(
spec_filename, obtain_mutex=obtain_mutex
)
if library_doc_and_mtime is None:
additional_info[_UNABLE_TO_LOAD] = True
return additional_info
library_doc = library_doc_and_mtime[0]
additional_info[_SOURCE_TO_MTIME] = _create_updated_source_to_mtime(library_doc)
return additional_info
except:
log.exception(
"Error creating additional info for spec filename: %s", spec_filename
)
return {}
def _load_spec_filename_additional_info(spec_filename):
"""
Loads additional information given a spec filename.
"""
import json
try:
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "r") as stream:
source_to_mtime = json.load(stream)
return source_to_mtime
except:
log.exception("Unable to load source mtimes from: %s", additional_info_filename)
return {}
def _dump_spec_filename_additional_info(spec_filename, is_builtin, obtain_mutex=True):
"""
Creates a filename with additional information not directly available in the
spec.
"""
import json
source_to_mtime = _create_additional_info(
spec_filename, is_builtin, obtain_mutex=obtain_mutex
)
additional_info_filename = _get_additional_info_filename(spec_filename)
with open(additional_info_filename, "w") as stream:
json.dump(source_to_mtime, stream, indent=2, sort_keys=True)
class _LibInfo(object):
__slots__ = [
"library_doc",
"mtime",
"_spec_filename",
"_additional_info",
"_invalid",
"_can_regenerate",
]
def __init__(self, library_doc, mtime, spec_filename, can_regenerate):
"""
:param library_doc:
:param mtime:
:param spec_filename:
:param bool can_regenerate:
False means that the information from this file can't really be
regenerated (i.e.: this is a spec file from a library or created
by the user).
"""
assert library_doc
assert mtime
assert spec_filename
self.library_doc = library_doc
self.mtime = mtime
self._can_regenerate = can_regenerate
self._spec_filename = spec_filename
self._additional_info = None
self._invalid = False
def verify_sources_sync(self):
"""
:return bool:
True if everything is ok and this library info can be used. Otherwise,
the spec file and the _LibInfo must be recreated.
"""
if not self._can_regenerate:
# This means that this info was generated by a library or the user
# himself, thus, we can't regenerate it.
return True
if self._invalid: # Once invalid, always invalid.
return False
additional_info = self._additional_info
if additional_info is None:
additional_info = _load_spec_filename_additional_info(self._spec_filename)
if additional_info.get(_IS_BUILTIN, False):
return True
source_to_mtime = additional_info.get(_SOURCE_TO_MTIME)
if source_to_mtime is None:
# Nothing to validate...
return True
updated_source_to_mtime = _create_updated_source_to_mtime(self.library_doc)
if source_to_mtime != updated_source_to_mtime:
log.info(
"Library %s is invalid. Current source to mtime:\n%s\nChanged from:\n%s"
% (self.library_doc.name, source_to_mtime, updated_source_to_mtime)
)
self._invalid = True
return False
return True
def _norm_filename(path):
return os.path.normcase(os.path.realpath(os.path.abspath(path)))
class _FolderInfo(object):
def __init__(self, folder_path, recursive):
self.folder_path = folder_path
self.recursive = recursive
self.libspec_filename_to_info = {}
self._watch = NULL
def start_watch(self, observer, notifier):
if self._watch is NULL:
if not os.path.isdir(self.folder_path):
if not os.path.exists(self.folder_path):
log.info(
"Trying to track changes in path which does not exist: %s",
self.folder_path,
)
else:
log.info(
"Trying to track changes in path which is not a folder: %s",
self.folder_path,
)
return
log.info("Tracking folder for changes: %s", self.folder_path)
from robocode_ls_core.watchdog_wrapper import PathInfo
folder_path = self.folder_path
self._watch = observer.notify_on_extensions_change(
[PathInfo(folder_path, recursive=self.recursive)],
["libspec"],
notifier.on_change,
(self._on_change_spec,),
)
def _on_change_spec(self, spec_file):
spec_file = _norm_filename(spec_file)
# Just add/remove that specific spec file from the tracked list.
libspec_filename_to_info = self.libspec_filename_to_info.copy()
if os.path.exists(spec_file):
libspec_filename_to_info[spec_file] = None
else:
libspec_filename_to_info.pop(spec_file, None)
self.libspec_filename_to_info = libspec_filename_to_info
def synchronize(self):
try:
self.libspec_filename_to_info = self._collect_libspec_info(
[self.folder_path],
self.libspec_filename_to_info,
recursive=self.recursive,
)
except Exception:
log.exception("Error when synchronizing: %s", self.folder_path)
def dispose(self):
watch = self._watch
self._watch = NULL
watch.stop_tracking()
self.libspec_filename_to_info = {}
def _collect_libspec_info(self, folders, old_libspec_filename_to_info, recursive):
seen_libspec_files = set()
if recursive:
for folder in folders:
if os.path.isdir(folder):
for root, _dirs, files in os.walk(folder):
for filename in files:
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(root, filename))
else:
for folder in folders:
if os.path.isdir(folder):
for filename in os.listdir(folder):
if filename.lower().endswith(".libspec"):
seen_libspec_files.add(os.path.join(folder, filename))
new_libspec_filename_to_info = {}
for filename in seen_libspec_files:
filename = _norm_filename(filename)
info = old_libspec_filename_to_info.get(filename)
if info is not None:
try:
curr_mtime = os.path.getmtime(filename)
except:
# it was deleted in the meanwhile...
continue
else:
if info.mtime != curr_mtime:
# The spec filename mtime changed, so, set to None
# to reload it.
info = None
new_libspec_filename_to_info[filename] = info
return new_libspec_filename_to_info
class LibspecManager(object):
"""
Used to manage the libspec files.
.libspec files are searched in the following directories:
- PYTHONPATH folders (not recursive)
- Workspace folders (recursive -- notifications from the LSP)
- ${user}.robotframework-ls/specs/${python_hash} (not recursive)
It searches for .libspec files in the folders tracked and provides the
keywords that are available from those (properly caching data as needed).
"""
@classmethod
def get_internal_libspec_dir(cls):
from robotframework_ls import robot_config
home = robot_config.get_robotframework_ls_home()
pyexe = sys.executable
if not isinstance(pyexe, bytes):
pyexe = pyexe.encode("utf-8")
import hashlib
digest = hashlib.sha256(pyexe).hexdigest()[:8]
try:
import robot
v = str(robot.get_version())
except:
v = "unknown"
# Note: _v1: information on the mtime of the libspec sources now available.
return os.path.join(home, "specs", "%s_%s" % (digest, v))
@classmethod
def get_internal_builtins_libspec_dir(cls, internal_libspec_dir=None):
return os.path.join(
internal_libspec_dir or cls.get_internal_libspec_dir(), "builtins"
)
def __init__(self, builtin_libspec_dir=None, user_libspec_dir=None):
"""
:param __internal_libspec_dir__:
Only to be used in tests (to regenerate the builtins)!
"""
from robocode_ls_core import watchdog_wrapper
from concurrent import futures
from multiprocessing import cpu_count
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=(cpu_count() * 1.2) + 1
)
self._observer = watchdog_wrapper.create_observer()
self._spec_changes_notifier = watchdog_wrapper.create_notifier(
self._on_spec_file_changed, timeout=0.5
)
self._libspec_dir = self.get_internal_libspec_dir()
self._user_libspec_dir = user_libspec_dir or os.path.join(
self._libspec_dir, "user"
)
self._builtins_libspec_dir = (
builtin_libspec_dir
or self.get_internal_builtins_libspec_dir(self._libspec_dir)
)
log.debug("User libspec dir: %s", self._user_libspec_dir)
log.debug("Builtins libspec dir: %s", self._builtins_libspec_dir)
try:
os.makedirs(self._user_libspec_dir)
except:
# Ignore exception if it's already created.
pass
try:
os.makedirs(self._builtins_libspec_dir)
except:
# Ignore exception if it's already created.
pass
# Spec info found in the workspace
self._workspace_folder_uri_to_folder_info = {}
self._additional_pythonpath_folder_to_folder_info = {}
# Spec info found in the pythonpath
pythonpath_folder_to_folder_info = {}
for path in sys.path:
if path and os.path.isdir(path):
pythonpath_folder_to_folder_info[path] = _FolderInfo(
path, recursive=False
)
self._pythonpath_folder_to_folder_info = pythonpath_folder_to_folder_info
# Spec info found in internal dirs (autogenerated)
self._internal_folder_to_folder_info = {
self._user_libspec_dir: _FolderInfo(
self._user_libspec_dir, recursive=False
),
self._builtins_libspec_dir: _FolderInfo(
self._builtins_libspec_dir, recursive=False
),
}
# Must be set from the outside world when needed.
self.config = None
self._synchronize()
self._gen_builtin_libraries()
@property
def config(self):
return self._config
@config.setter
def config(self, config):
from robotframework_ls.impl.robot_lsp_constants import OPTION_ROBOT_PYTHONPATH
self._config = config
existing_entries = set(self._additional_pythonpath_folder_to_folder_info.keys())
if config is not None:
pythonpath_entries = set(
config.get_setting(OPTION_ROBOT_PYTHONPATH, list, [])
)
for new_pythonpath_entry in pythonpath_entries:
if new_pythonpath_entry not in existing_entries:
self.add_additional_pythonpath_folder(new_pythonpath_entry)
for old_entry in existing_entries:
if old_entry not in pythonpath_entries:
self.remove_additional_pythonpath_folder(old_entry)
self.synchronize_additional_pythonpath_folders()
@property
def user_libspec_dir(self):
return self._user_libspec_dir
def _on_spec_file_changed(self, spec_file, target):
log.debug("File change detected: %s", spec_file)
target(spec_file)
def add_workspace_folder(self, folder_uri):
from robocode_ls_core import uris
if folder_uri not in self._workspace_folder_uri_to_folder_info:
log.debug("Added workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp[folder_uri] = _FolderInfo(
uris.to_fs_path(folder_uri), recursive=True
)
self._workspace_folder_uri_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Workspace folder already added: %s", folder_uri)
def remove_workspace_folder(self, folder_uri):
if folder_uri in self._workspace_folder_uri_to_folder_info:
log.debug("Removed workspace folder: %s", folder_uri)
cp = self._workspace_folder_uri_to_folder_info.copy()
folder_info = cp.pop(folder_uri, NULL)
folder_info.dispose()
self._workspace_folder_uri_to_folder_info = cp
else:
log.debug("Workspace folder already removed: %s", folder_uri)
def add_additional_pythonpath_folder(self, folder_path):
if folder_path not in self._additional_pythonpath_folder_to_folder_info:
log.debug("Added additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp[folder_path] = _FolderInfo(folder_path, recursive=True)
self._additional_pythonpath_folder_to_folder_info = cp
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
else:
log.debug("Additional pythonpath folder already added: %s", folder_path)
def remove_additional_pythonpath_folder(self, folder_path):
if folder_path in self._additional_pythonpath_folder_to_folder_info:
log.debug("Removed additional pythonpath folder: %s", folder_path)
cp = self._additional_pythonpath_folder_to_folder_info.copy()
folder_info = cp.pop(folder_path, NULL)
folder_info.dispose()
self._additional_pythonpath_folder_to_folder_info = cp
else:
log.debug("Additional pythonpath folder already removed: %s", folder_path)
def _gen_builtin_libraries(self):
"""
Generates .libspec files for the libraries builtin (if needed).
"""
import time
try:
from robotframework_ls.impl import robot_constants
from robocode_ls_core.system_mutex import timed_acquire_mutex
from robocode_ls_core.system_mutex import generate_mutex_name
initial_time = time.time()
wait_for = []
with timed_acquire_mutex(
generate_mutex_name(
_norm_filename(self._builtins_libspec_dir), prefix="gen_builtins_"
),
timeout=100,
):
for libname in robot_constants.STDLIBS:
library_info = self.get_library_info(libname, create=False)
if library_info is None:
wait_for.append(
self._thread_pool.submit(
self._create_libspec, libname, is_builtin=True
)
)
for future in wait_for:
future.result()
if wait_for:
log.debug(
"Total time to generate builtins: %.2fs"
% (time.time() - initial_time)
)
self.synchronize_internal_libspec_folders()
except:
log.exception("Error creating builtin libraries.")
def synchronize_workspace_folders(self):
for folder_info in self._workspace_folder_uri_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_pythonpath_folders(self):
for folder_info in self._pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_additional_pythonpath_folders(self):
for folder_info in self._additional_pythonpath_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def synchronize_internal_libspec_folders(self):
for folder_info in self._internal_folder_to_folder_info.values():
folder_info.start_watch(self._observer, self._spec_changes_notifier)
folder_info.synchronize()
def _synchronize(self):
"""
Updates the internal caches related to the tracked .libspec files found.
This can be a slow call as it may traverse the whole workspace folders
hierarchy, so, it should be used only during startup to fill the initial
info.
"""
self.synchronize_workspace_folders()
self.synchronize_pythonpath_folders()
self.synchronize_additional_pythonpath_folders()
self.synchronize_internal_libspec_folders()
def _iter_lib_info(self):
"""
:rtype: generator(_LibInfo)
"""
# Note: the iteration order is important (first ones are visited earlier
# and have higher priority).
iter_in = []
for (_uri, info) in self._workspace_folder_uri_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._additional_pythonpath_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, False))
for (_uri, info) in self._internal_folder_to_folder_info.items():
iter_in.append((info.libspec_filename_to_info, True))
for filename_to_info, can_regenerate in iter_in:
for spec_filename, info in list(filename_to_info.items()):
if info is None:
info = filename_to_info[spec_filename] = _load_lib_info(
spec_filename, can_regenerate
)
# Note: we could end up yielding a library with the same name
# multiple times due to its scope. It's up to the caller to
# validate that.
# Note: we also check if there are keywords available... in
# some cases we may create libraries for namespace packages
# (i.e.: empty folders) which don't really have anything -- in
# this case, this isn't a valid library.
if (
info is not None
and info.library_doc is not None
and info.library_doc.keywords
):
yield info
def get_library_names(self):
return sorted(
set(lib_info.library_doc.name for lib_info in self._iter_lib_info())
)
def _create_libspec(
self,
libname,
env=None,
log_time=True,
cwd=None,
additional_path=None,
is_builtin=False,
):
"""
:param str libname:
:raise Exception: if unable to create the library.
"""
import time
from robotframework_ls.impl import robot_constants
from robocode_ls_core.subprocess_wrapper import subprocess
from robocode_ls_core.system_mutex import timed_acquire_mutex
curtime = time.time()
try:
try:
call = [sys.executable]
call.extend("-m robot.libdoc --format XML:HTML".split())
if additional_path:
if os.path.exists(additional_path):
call.extend(["-P", additional_path])
additional_pythonpath_entries = list(
self._additional_pythonpath_folder_to_folder_info.keys()
)
for entry in list(additional_pythonpath_entries):
if os.path.exists(entry):
call.extend(["-P", entry])
call.append(libname)
libspec_dir = self._user_libspec_dir
if libname in robot_constants.STDLIBS:
libspec_dir = self._builtins_libspec_dir
libspec_filename = os.path.join(libspec_dir, libname + ".libspec")
with timed_acquire_mutex(
_get_libspec_mutex_name(libspec_filename)
): # Could fail.
call.append(libspec_filename)
mtime = -1
try:
mtime = os.path.getmtime(libspec_filename)
except:
pass
log.debug(
"Generating libspec for: %s.\nCwd:%s\nCommand line:\n%s",
libname,
cwd,
" ".join(call),
)
try:
try:
# Note: stdout is always subprocess.PIPE in this call.
subprocess.check_output(
call,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=env,
cwd=cwd,
)
except OSError as e:
log.exception("Error calling: %s", call)
# We may have something as: Ignore OSError: [WinError 6] The handle is invalid,
# give the result based on whether the file changed on disk.
try:
if mtime != os.path.getmtime(libspec_filename):
_dump_spec_filename_additional_info(
libspec_filename,
is_builtin=is_builtin,
obtain_mutex=False,
)
return True
except:
pass
log.debug("Not retrying after OSError failure.")
return False
except subprocess.CalledProcessError as e:
log.exception(
"Error creating libspec: %s. Output:\n%s", libname, e.output
)
return False
_dump_spec_filename_additional_info(
libspec_filename, is_builtin=is_builtin, obtain_mutex=False
)
return True
except Exception:
log.exception("Error creating libspec: %s", libname)
return False
finally:
if log_time:
delta = time.time() - curtime
log.debug("Took: %.2fs to generate info for: %s" % (delta, libname))
def dispose(self):
self._observer.dispose()
self._spec_changes_notifier.dispose()
def _do_create_libspec_on_get(self, libname, current_doc_uri):
from robocode_ls_core import uris
additional_path = None
abspath = None
cwd = None
if current_doc_uri is not None:
cwd = os.path.dirname(uris.to_fs_path(current_doc_uri))
if not cwd or not os.path.isdir(cwd):
cwd = None
if os.path.isabs(libname):
abspath = libname
elif current_doc_uri is not None:
# relative path: let's make it absolute
fs_path = os.path.dirname(uris.to_fs_path(current_doc_uri))
abspath = os.path.abspath(os.path.join(fs_path, libname))
if abspath:
additional_path = os.path.dirname(abspath)
libname = os.path.basename(libname)
if libname.lower().endswith((".py", ".class", ".java")):
libname = os.path.splitext(libname)[0]
if self._create_libspec(libname, additional_path=additional_path, cwd=cwd):
self.synchronize_internal_libspec_folders()
return True
return False
def get_library_info(self, libname, create=True, current_doc_uri=None):
"""
:param libname:
It may be a library name, a relative path to a .py file or an
absolute path to a .py file.
:rtype: LibraryDoc
"""
libname_lower = libname.lower()
if libname_lower.endswith((".py", ".class", ".java")):
libname_lower = os.path.splitext(libname)[0]
if "/" in libname_lower or "\\" in libname_lower:
libname_lower = os.path.basename(libname_lower)
for lib_info in self._iter_lib_info():
library_doc = lib_info.library_doc
if library_doc.name and library_doc.name.lower() == libname_lower:
if not lib_info.verify_sources_sync():
if create:
# Found but it's not in sync. Try to regenerate (don't proceed
# because we don't want to match a lower priority item, so,
# regenerate and get from the cache without creating).
self._do_create_libspec_on_get(libname, current_doc_uri)
# Note: get even if it if was not created (we may match
# a lower priority library).
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
else:
# Not in sync and it should not be created, just skip it.
continue
else:
return library_doc
if create:
if self._do_create_libspec_on_get(libname, current_doc_uri):
return self.get_library_info(
libname, create=False, current_doc_uri=current_doc_uri
)
log.debug("Unable to find library named: %s", libname)
return None
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if not head or not head.next:
return None
# make this stop at n+1th index, if head is 0
first = head
count = 0
while first and count <= n:
first = first.next
count += 1
# move both first and second
# they have n node between them
# when first becomes None (beyond n of list), second is one node behind nth node from end
second = head
while first:
first = first.next
second = second.next
# count == n+1 means exitted naturally
if second == head and count != n+1:
head = second.next
else:
second.next = second.next.next
return head
|
from django.apps import AppConfig
class Apiv3Config(AppConfig):
name = "apiv3"
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateSettings
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
from google.cloud import logging_v2
def sample_update_settings():
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
request = logging_v2.UpdateSettingsRequest(
name="name_value",
)
# Make the request
response = client.update_settings(request=request)
# Handle the response
print(response)
# [END logging_v2_generated_ConfigServiceV2_UpdateSettings_sync]
|
import param
import numpy as np
from cartopy import crs as ccrs
from cartopy.img_transform import warp_array, _determine_bounds
from holoviews.core.util import cartesian_product, get_param_values
from holoviews.operation import Operation
from shapely.geometry import Polygon, LineString, MultiPolygon, MultiLineString
from ..element import (Image, Shape, Polygons, Path, Points, Contours,
RGB, Graph, Nodes, EdgePaths, QuadMesh, VectorField,
HexTiles, Labels)
from ..util import (
project_extents, geom_to_array, wrap_path_data, is_multi_geometry,
polygon_to_geom, path_to_geom
)
class _project_operation(Operation):
"""
Baseclass for projection operations, projecting elements from their
source coordinate reference system to the supplied projection.
"""
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the shape type is projected to.""")
# Defines the types of elements supported by the operation
supported_types = []
def _process(self, element, key=None):
return element.map(self._process_element, self.supported_types)
class project_path(_project_operation):
"""
Projects Polygons and Path Elements from their source coordinate
reference system to the supplied projection.
"""
supported_types = [Polygons, Path, Contours, EdgePaths]
def _project_path(self, element, path, data, boundary, geom_type, multi_type):
"""
Handle case of continuously varying path
"""
xdim, ydim = path.kdims[:2]
xs, ys = (path.dimension_values(i) for i in range(2))
if not len(xs):
return []
proj_arr = self.p.projection.quick_vertices_transform(
np.column_stack([xs, ys]), element.crs)
if proj_arr is None:
vertices = np.column_stack([xs, ys])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
path = geom_type(vertices)
if boundary:
path = path.intersection(boundary)
if not path:
return []
proj = self.p.projection.project_geometry(path, element.crs)
proj_arr = geom_to_array(proj)
data[xdim.name] = proj_arr[:, 0]
data[ydim.name] = proj_arr[:, 1]
return [data]
def _project_contour(self, element, contour, data, boundary, geom_type, multi_type):
"""
Handle case of iso-contour
"""
xdim, ydim = contour.kdims[:2]
data = {k: vals[0] for k, vals in data.items()}
# Wrap longitudes
vertices = contour.array([0, 1])
if hasattr(element.crs, '_bbox_and_offset'):
vertices = wrap_path_data(vertices, element.crs, element.crs)
element = type(element)([vertices])
to_geom = polygon_to_geom if isinstance(element, Polygon) else path_to_geom
# Clip path to projection boundaries
geoms = []
for g in to_geom(element, multi=False, skip_invalid=False):
if np.isinf(np.array(g.array_interface_base['data'])).sum():
# Skip if infinity in path
continue
try:
# Compute boundary intersections
if boundary:
g = g.intersection(boundary)
except:
continue
if is_multi_geometry(g):
for p in g:
try:
geoms.append(geom_type(p))
except:
continue
else:
geoms.append(g)
# Project geometry
projected = []
for g in geoms:
proj = self.p.projection.project_geometry(g, contour.crs)
proj = proj if is_multi_geometry(proj) else [proj]
for geom in proj:
vertices = np.array(geom.array_interface_base['data']).reshape(-1, 2)
xs, ys = vertices.T
if len(xs):
projected.append(dict(data, **{xdim.name: xs, ydim.name: ys}))
return projected
def _project_geodataframe(self, element):
geoms = element.split(datatype='geom')
projected = [self.p.projection.project_geometry(geom, element.crs)
for geom in geoms]
new_data = element.data.copy()
new_data['geometry'] = projected
return element.clone(new_data, crs=self.p.projection)
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
elif element.interface.datatype == 'geodataframe':
return self._project_geodataframe(element)
boundary = element.crs.project_geometry(Polygon(self.p.projection.boundary),
self.p.projection)
if isinstance(element, Polygons):
multi_type, geom_type = MultiPolygon, Polygon
else:
multi_type, geom_type = MultiLineString, LineString
projected = []
paths = element.split()
for path in paths:
data = {vd.name: path.dimension_values(vd, expanded=False) for vd in path.vdims}
if any(len(vals) > 1 for vals in data.values()):
projected += self._project_path(element, path, data, boundary, geom_type, multi_type)
else:
projected += self._project_contour(element, path, data, boundary, geom_type, multi_type)
if len(paths) and len(projected) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(projected, crs=self.p.projection)
class project_shape(_project_operation):
"""
Projects Shape Element from the source coordinate reference system
to the supplied projection.
"""
supported_types = [Shape]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
geom = element.geom()
vertices = geom_to_array(geom)
if isinstance(geom, (MultiPolygon, Polygon)):
obj = Polygons([vertices])
else:
obj = Path([vertices])
geom = project_path(obj, projection=self.p.projection).geom()
return element.clone(geom, crs=self.p.projection)
class project_points(_project_operation):
supported_types = [Points, Nodes, VectorField, HexTiles, Labels]
def _process_element(self, element):
if not len(element):
return element.clone(crs=self.p.projection)
xdim, ydim = element.dimensions()[:2]
xs, ys = (element.dimension_values(i) for i in range(2))
coordinates = self.p.projection.transform_points(element.crs, xs, ys)
mask = np.isfinite(coordinates[:, 0])
new_data = {k: v[mask] for k, v in element.columns().items()}
new_data[xdim.name] = coordinates[mask, 0]
new_data[ydim.name] = coordinates[mask, 1]
datatype = [element.interface.datatype]+element.datatype
if len(new_data[xdim.name]) == 0:
self.warning('While projecting a %s element from a %s coordinate '
'reference system (crs) to a %s projection none of '
'the projected paths were contained within the bounds '
'specified by the projection. Ensure you have specified '
'the correct coordinate system for your data.' %
(type(element).__name__, type(element.crs).__name__,
type(self.p.projection).__name__))
return element.clone(new_data, crs=self.p.projection,
datatype=datatype)
class project_graph(_project_operation):
supported_types = [Graph]
def _process_element(self, element):
nodes = project_points(element.nodes, projection=self.projection)
data = (element.data, nodes)
if element._edgepaths:
data = data + (project_path(element.edgepaths, projection=self.projection),)
return element.clone(data, crs=self.projection)
class project_quadmesh(_project_operation):
supported_types = [QuadMesh]
def _process_element(self, element):
proj = self.p.projection
irregular = any(element.interface.irregular(element, kd)
for kd in element.kdims)
zs = element.dimension_values(2, flat=False)
if irregular:
X, Y = [np.asarray(element.interface.coords(element, kd, expanded=True))
for kd in element.kdims]
else:
X = element.dimension_values(0, expanded=True)
Y = element.dimension_values(1, expanded=True)
zs = zs.T
coords = proj.transform_points(element.crs, X, Y)
PX, PY = coords[..., 0], coords[..., 1]
# Mask quads which are wrapping around the x-axis
wrap_proj_types = (ccrs._RectangularProjection,
ccrs._WarpedRectangularProjection,
ccrs.InterruptedGoodeHomolosine,
ccrs.Mercator)
if isinstance(proj, wrap_proj_types):
with np.errstate(invalid='ignore'):
edge_lengths = np.hypot(
np.diff(PX , axis=1),
np.diff(PY, axis=1)
)
to_mask = (
(edge_lengths >= abs(proj.x_limits[1] -
proj.x_limits[0]) / 2) |
np.isnan(edge_lengths)
)
if np.any(to_mask):
mask = np.zeros(zs.shape, dtype=np.bool)
mask[:, 1:][to_mask] = True
mask[:, 2:][to_mask[:, :-1]] = True
mask[:, :-1][to_mask] = True
mask[:, :-2][to_mask[:, 1:]] = True
mask[1:, 1:][to_mask[:-1]] = True
mask[1:, :-1][to_mask[:-1]] = True
mask[:-1, 1:][to_mask[1:]] = True
mask[:-1, :-1][to_mask[1:]] = True
zs[mask] = np.NaN
params = get_param_values(element)
if PX.ndim < 2:
PX = PX.reshape(zs.shape)
if PY.ndim < 2:
PY = PY.reshape(zs.shape)
return QuadMesh((PX, PY, zs), crs=self.projection, **params)
class project_image(_project_operation):
"""
Projects an geoviews Image to the specified projection,
returning a regular HoloViews Image type. Works by
regridding the data along projected bounds. Only supports
rectangular projections.
"""
fast = param.Boolean(default=False, doc="""
Whether to enable fast reprojection with (much) better
performance but poorer handling in polar regions.""")
width = param.Integer(default=None, doc="""
Width of the reprojectd Image""")
height = param.Integer(default=None, doc="""
Height of the reprojected Image""")
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying project_image, backends that support linked streams
update RangeXY streams on the inputs of the operation.""")
supported_types = [Image]
def _process(self, img, key=None):
if self.p.fast:
return self._fast_process(img, key)
proj = self.p.projection
if proj == img.crs:
return img
x0, x1 = img.range(0)
y0, y1 = img.range(1)
xn, yn = img.interface.shape(img, gridded=True)[:2]
px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),
img.crs, proj)
src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)
arrays = []
for vd in img.vdims:
arr = img.dimension_values(vd, flat=False)
if arr.size:
projected, extents = warp_array(arr, proj, img.crs, (xn, yn),
src_ext, trgt_ext)
else:
projected, extents = arr, trgt_ext
arrays.append(projected)
projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]
data = np.flipud(projected)
bounds = (extents[0], extents[2], extents[1], extents[3])
return img.clone(data, bounds=bounds, kdims=img.kdims,
vdims=img.vdims, crs=proj, xdensity=None,
ydensity=None)
def _fast_process(self, element, key=None):
# Project coordinates
proj = self.p.projection
if proj == element.crs:
return element
h, w = element.interface.shape(element, gridded=True)[:2]
xs = element.dimension_values(0)
ys = element.dimension_values(1)
if isinstance(element, RGB):
rgb = element.rgb
array = np.dstack([np.flipud(rgb.dimension_values(d, flat=False))
for d in rgb.vdims])
else:
array = element.dimension_values(2, flat=False)
(x0, y0, x1, y1) = element.bounds.lbrt()
width = int(w) if self.p.width is None else self.p.width
height = int(h) if self.p.height is None else self.p.height
bounds = _determine_bounds(xs, ys, element.crs)
yb = bounds['y']
resampled = []
xvalues = []
for xb in bounds['x']:
px0, py0, px1, py1 = project_extents((xb[0], yb[0], xb[1], yb[1]), element.crs, proj)
if len(bounds['x']) > 1:
xfraction = (xb[1]-xb[0])/(x1-x0)
fraction_width = int(width*xfraction)
else:
fraction_width = width
xs = np.linspace(px0, px1, fraction_width)
ys = np.linspace(py0, py1, height)
cxs, cys = cartesian_product([xs, ys])
pxs, pys, _ = element.crs.transform_points(proj, np.asarray(cxs), np.asarray(cys)).T
icxs = (((pxs-x0) / (x1-x0)) * w).astype(int)
icys = (((pys-y0) / (y1-y0)) * h).astype(int)
xvalues.append(xs)
icxs[icxs<0] = 0
icys[icys<0] = 0
icxs[icxs>=w] = w-1
icys[icys>=h] = h-1
resampled_arr = array[icys, icxs]
if isinstance(element, RGB):
nvdims = len(element.vdims)
resampled_arr = resampled_arr.reshape((fraction_width, height, nvdims)).transpose([1, 0, 2])
else:
resampled_arr = resampled_arr.reshape((fraction_width, height)).T
resampled.append(resampled_arr)
xs = np.concatenate(xvalues[::-1])
resampled = np.hstack(resampled[::-1])
datatypes = [element.interface.datatype, 'xarray', 'grid']
data = (xs, ys)
for i in range(len(element.vdims)):
if resampled.ndim > 2:
data = data + (resampled[::-1, :, i],)
else:
data = data + (resampled,)
return element.clone(data, crs=proj, bounds=None, datatype=datatypes)
class project(Operation):
"""
Projects GeoViews Element types to the specified projection.
"""
projection = param.ClassSelector(default=ccrs.GOOGLE_MERCATOR,
class_=ccrs.Projection,
instantiate=False, doc="""
Projection the image type is projected to.""")
_operations = [project_path, project_image, project_shape,
project_graph, project_quadmesh, project_points]
def _process(self, element, key=None):
for op in self._operations:
element = element.map(op.instance(projection=self.p.projection),
op.supported_types)
return element
|
from typing import Any, Dict, Union
import httpx
from ...client import Client
from ...types import UNSET, Response, Unset
def _get_kwargs(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Dict[str, Any]:
url = "{}/common_parameters".format(client.base_url)
headers: Dict[str, Any] = client.get_headers()
cookies: Dict[str, Any] = client.get_cookies()
params: Dict[str, Any] = {
"common": common,
}
params = {k: v for k, v in params.items() if v is not UNSET and v is not None}
return {
"url": url,
"headers": headers,
"cookies": cookies,
"timeout": client.get_timeout(),
"params": params,
}
def _build_response(*, response: httpx.Response) -> Response[Any]:
return Response(
status_code=response.status_code,
content=response.content,
headers=response.headers,
parsed=None,
)
def sync_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
"""
Args:
common (Union[Unset, None, str]):
Returns:
Response[Any]
"""
kwargs = _get_kwargs(
client=client,
common=common,
)
response = httpx.get(
verify=client.verify_ssl,
**kwargs,
)
return _build_response(response=response)
async def asyncio_detailed(
*,
client: Client,
common: Union[Unset, None, str] = UNSET,
) -> Response[Any]:
"""
Args:
common (Union[Unset, None, str]):
Returns:
Response[Any]
"""
kwargs = _get_kwargs(
client=client,
common=common,
)
async with httpx.AsyncClient(verify=client.verify_ssl) as _client:
response = await _client.get(**kwargs)
return _build_response(response=response)
|
import pytest
import allure
from _pytest.nodes import Item
from _pytest.runner import CallInfo
from selene.core.exceptions import TimeoutException
from selene.support.shared import browser
@pytest.fixture(scope='function', autouse=True)
def browser_management():
"""
Here, before yield,
goes all "setup" code for each test case
aka "before test function" hook
"""
# def attach_snapshots_on_failure(error: TimeoutException) -> Exception:
# """
# An example of selene hook_wait_failure that attaches snapshots to failed test step.
# It is actually not needed and optional,
# because in the pytest_runtest_makereport hook below
# we attach screenshots to the test body itself,
# that is more handy during analysis of test report
#
# but if you need it, you can enable it by uncommenting
# together with the following ``browser.config.hook_wait_failure =`` line;)
#
# otherwise, you can remove it
# """
# last_screenshot = browser.config.last_screenshot
# if last_screenshot:
# allure.attach.file(source=last_screenshot,
# name='screenshot on failure',
# attachment_type=allure.attachment_type.PNG)
#
# last_page_source = browser.config.last_page_source
# if last_page_source:
# allure.attach.file(source=last_page_source,
# name='page source on failure',
# attachment_type=allure.attachment_type.HTML)
# return error
# browser.config.hook_wait_failure = attach_snapshots_on_failure
browser.config.timeout = 3
# todo: add your before setup here...
yield
"""
Here, after yield,
goes all "tear down" code for each test case
aka "after test function" hook
"""
# todo: add your after setup here...
browser.quit()
prev_test_screenshot = None
prev_test_page_source = None
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_setup(item):
yield
global prev_test_screenshot
prev_test_screenshot = browser.config.last_screenshot
global prev_test_page_source
prev_test_page_source = browser.config.last_page_source
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo):
"""
Attach snapshots on test failure
"""
# All code prior to yield statement would be ran prior
# to any other of the same fixtures defined
outcome = yield # Run all other pytest_runtest_makereport non wrapped hooks
result = outcome.get_result()
if result.when == "call" and result.failed:
last_screenshot = browser.config.last_screenshot
if last_screenshot and not last_screenshot == prev_test_screenshot:
allure.attach.file(source=last_screenshot,
name='screenshot',
attachment_type=allure.attachment_type.PNG)
last_page_source = browser.config.last_page_source
if last_page_source and not last_page_source == prev_test_page_source:
allure.attach.file(source=last_page_source,
name='page source',
attachment_type=allure.attachment_type.HTML)
|
#!/usr/bin/python
# -*- coding: utf_8 -*-
"""Access and query Twitter's API with the simplistic twitter package (`pip install twitter`).
"""
from __future__ import print_function
from __future__ import unicode_literals
import csv
import os
import time
from twitter import OAuth
from twitter import Twitter
def setup_twitter(config_file='config.py'):
"""Setup auth keys and session with Twitter client."""
config = {}
execfile(config_file, config)
twitter_obj = Twitter(auth=OAuth(config["access_key"],
config["access_secret"],
config["consumer_key"],
config["consumer_secret"]))
return twitter_obj
def search_twitter(twitter_session, query, count=100, status='popular'):
"""Submit query to Twitter API via twitter package."""
status_options = ['mixed', 'recent', 'popular']
assert status in status_options, "'status' must be in {}.".format(status_options)
query = twitter_session.search.tweets(q=query,
lang='en',
result=status,
count=count,
retry=True)
return query
def parse_twitter_response(twitter_response, min_rts=500, strip_non_ascii=True):
"""Extract requested variables from Twitter API response. Yield each tweet
one at a time with a generator. Available keys:
[u'contributors', u'truncated', u'text', u'is_quote_status',
u'in_reply_to_status_id', u'id', u'favorite_count', u'source',
u'retweeted', u'coordinates', u'entities', u'in_reply_to_screen_name',
u'in_reply_to_user_id', u'retweet_count', u'id_str', u'favorited',
u'retweeted_status', u'user', u'geo', u'in_reply_to_user_id_str',
u'possibly_sensitive', u'lang', u'created_at',
u'in_reply_to_status_id_str', u'place', u'metadata']
"""
for result in twitter_response['statuses']:
tweet_datetime = result['created_at']
text = result['text'].encode('utf_8')
if strip_non_ascii:
text = ''.join([i if ord(i) < 128 else ' ' for i in text])
# Strip 'RT ' from head of retweets, redundant
if text.startswith('RT '):
text = text[3:]
# Ch newlines to spaces
text = ''.join([' ' if c == '\n' else c for c in text])
rt_count = result['retweet_count']
yield {'_tweet_datetime': tweet_datetime,
'_text': text,
'_rt_count': rt_count}
def search_parse_write_tweets(query_str,
total_to_fetch,
status,
minimum_rts,
low_rt_threshold):
twitter = setup_twitter()
query_response = search_twitter(twitter_session=twitter,
query=query_disjunction,
count=TWEETS_TO_FETCH,
status=status)
print("Search complete ({} seconds)".format(query_response["search_metadata"]["completed_in"]))
tweets_data = parse_twitter_response(query_response, min_rts=minimum_rts) # yields generator
fieldnames = []
if not fieldnames:
for row in tweets_data:
fieldnames = row.keys()
fieldnames_len = len(row.keys())
break
# Set up csv writers
file1 = 'tweets/tweets_popular.csv'
f1_write_header = False
if not os.path.isfile(file1):
f1_write_header = True
csv_popular_open = open(file1, 'ab')
csv_popular_writer = csv.DictWriter(csv_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f1_write_header:
csv_popular_writer.writeheader()
file2 = 'tweets/tweets_not_popular.csv'
f2_write_header = False
if not os.path.isfile(file2):
f2_write_header = True
csv_not_popular_open = open(file2, 'ab')
csv_not_popular_writer = csv.DictWriter(csv_not_popular_open, delimiter=b'|', fieldnames=fieldnames)
if f2_write_header:
csv_not_popular_writer.writeheader()
# Loop thru generator of dicts, write row to right file
for tweet_data in tweets_data:
if tweet_data['rt_count'] >= minimum_rts:
if len(tweet_data.keys()) == fieldnames_len:
csv_popular_writer.writerow(tweet_data)
elif tweet_data['rt_count'] <= low_rt_threshold:
if len(tweet_data.keys()) == fieldnames_len:
csv_not_popular_writer.writerow(tweet_data)
if __name__ == '__main__':
TWEETS_TO_FETCH = 1000
query_string = 'the a u i me she you he they for rt at tweet'.split(' ')
query_disjunction = ' OR '.join(query_string)
#status = 'popular' # ['mixed', 'recent', 'popular']
minimum_rts = 500
low_rt_threshold = 10
while True:
time.sleep(60)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='popular',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
search_parse_write_tweets(query_str=query_disjunction,
total_to_fetch=TWEETS_TO_FETCH,
status='mixed',
minimum_rts=minimum_rts,
low_rt_threshold=low_rt_threshold)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin import (Plugin, Int, Float, Range, Metadata, Str, Bool,
Choices, MetadataColumn, Categorical, List,
Citations, TypeMatch, TypeMap)
import q2_feature_table
from q2_types.feature_table import (
FeatureTable, Frequency, RelativeFrequency, PresenceAbsence, Composition)
from q2_types.feature_data import (
FeatureData, Sequence, Taxonomy, AlignedSequence)
from .examples import (feature_table_merge_example,
feature_table_merge_three_tables_example)
citations = Citations.load('citations.bib', package='q2_feature_table')
plugin = Plugin(
name='feature-table',
version=q2_feature_table.__version__,
website='https://github.com/qiime2/q2-feature-table',
package='q2_feature_table',
short_description=('Plugin for working with sample by feature tables.'),
description=('This is a QIIME 2 plugin supporting operations on sample '
'by feature tables, such as filtering, merging, and '
'transforming tables.')
)
plugin.methods.register_function(
function=q2_feature_table.rarefy,
inputs={'table': FeatureTable[Frequency]},
parameters={'sampling_depth': Int % Range(1, None),
'with_replacement': Bool},
outputs=[('rarefied_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be rarefied.'},
parameter_descriptions={
'sampling_depth': ('The total frequency that each sample should be '
'rarefied to. Samples where the sum of frequencies '
'is less than the sampling depth will be not be '
'included in the resulting table unless '
'subsampling is performed with replacement.'),
'with_replacement': ('Rarefy with replacement by sampling from the '
'multinomial distribution instead of rarefying '
'without replacement.')
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.'
},
name='Rarefy table',
description=("Subsample frequencies from all samples so that the sum of "
"frequencies in each sample is equal to sampling-depth."),
citations=[citations['Weiss2017']]
)
plugin.methods.register_function(
function=q2_feature_table.subsample,
inputs={'table': FeatureTable[Frequency]},
parameters={'subsampling_depth': Int % Range(1, None),
'axis': Str % Choices(['sample', 'feature'])},
outputs=[('sampled_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be sampled.'},
parameter_descriptions={
'subsampling_depth': ('The total number of samples or features to be '
'randomly sampled. Samples or features that are '
'reduced to a zero sum will not be included in '
'the resulting table.'),
'axis': ('The axis to sample over. If "sample" then samples will be '
'randomly selected to be retained. If "feature" then '
'a random set of features will be selected to be retained.')
},
output_descriptions={
'sampled_table': 'The resulting subsampled feature table.'
},
name='Subsample table',
description=("Randomly pick samples or features, without replacement, "
"from the table.")
)
plugin.methods.register_function(
function=q2_feature_table.presence_absence,
inputs={'table': FeatureTable[Frequency | RelativeFrequency]},
parameters={},
outputs=[('presence_absence_table', FeatureTable[PresenceAbsence])],
input_descriptions={
'table': ('The feature table to be converted into presence/absence '
'abundances.')
},
parameter_descriptions={},
output_descriptions={
'presence_absence_table': ('The resulting presence/absence feature '
'table.')
},
name="Convert to presence/absence",
description="Convert frequencies to binary values indicating presence or "
"absence of a feature in a sample."
)
plugin.methods.register_function(
function=q2_feature_table.relative_frequency,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[
('relative_frequency_table',
FeatureTable[RelativeFrequency])],
input_descriptions={
'table': 'The feature table to be converted into relative frequencies.'
},
parameter_descriptions={},
output_descriptions={
'relative_frequency_table': ('The resulting relative frequency '
'feature table.')
},
name="Convert to relative frequencies",
description="Convert frequencies to relative frequencies by dividing each "
"frequency in a sample by the sum of frequencies in that "
"sample."
)
plugin.methods.register_function(
function=q2_feature_table.transpose,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[('transposed_feature_table',
FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table to be transposed.'
},
parameter_descriptions={},
output_descriptions={
'transposed_feature_table': ('The resulting transposed feature table.')
},
name='Transpose a feature table.',
description='Transpose the rows and columns '
'(typically samples and features) of a feature table.'
)
plugin.methods.register_function(
function=q2_feature_table.group,
inputs={'table': FeatureTable[Frequency]},
parameters={
'mode': Str % Choices({'sum', 'median-ceiling', 'mean-ceiling'}),
'metadata': MetadataColumn[Categorical],
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('grouped_table', FeatureTable[Frequency])
],
input_descriptions={
'table': 'The table to group samples or features on.'
},
parameter_descriptions={
'mode': 'How to combine samples or features within a group. `sum` '
'will sum the frequencies across all samples or features '
'within a group; `mean-ceiling` will take the ceiling of the '
'mean of these frequencies; `median-ceiling` will take the '
'ceiling of the median of these frequencies.',
'metadata': 'A column defining the groups. Each unique value will '
'become a new ID for the table on the given `axis`.',
'axis': 'Along which axis to group. Each ID in the given axis must '
'exist in `metadata`.'
},
output_descriptions={
'grouped_table': 'A table that has been grouped along the given '
'`axis`. IDs on that axis are replaced by values in '
'the `metadata` column.'
},
name="Group samples or features by a metadata column",
description="Group samples or features in a feature table using metadata "
"to define the mapping of IDs to a group."
)
i_table, p_overlap_method, o_table = TypeMap({
(FeatureTable[Frequency],
Str % Choices(sorted(q2_feature_table.overlap_methods()))):
FeatureTable[Frequency],
(FeatureTable[RelativeFrequency],
# We don't want to allow summing of RelativeFrequency tables, so remove
# that option from the overlap methods
Str % Choices(sorted(q2_feature_table.overlap_methods() - {'sum'}))):
FeatureTable[RelativeFrequency]
})
plugin.methods.register_function(
function=q2_feature_table.merge,
inputs={'tables': List[i_table]},
parameters={
'overlap_method': p_overlap_method
},
outputs=[
('merged_table', o_table)],
input_descriptions={
'tables': 'The collection of feature tables to be merged.',
},
parameter_descriptions={
'overlap_method': 'Method for handling overlapping ids.',
},
output_descriptions={
'merged_table': ('The resulting merged feature table.'),
},
name="Combine multiple tables",
description="Combines feature tables using the `overlap_method` provided.",
examples={'basic': feature_table_merge_example,
'three_tables': feature_table_merge_three_tables_example},
)
plugin.methods.register_function(
function=q2_feature_table.merge_seqs,
inputs={'data': List[FeatureData[Sequence]]},
parameters={},
outputs=[
('merged_data', FeatureData[Sequence])],
input_descriptions={
'data': 'The collection of feature sequences to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature sequences '
'containing all feature sequences provided.')
},
name="Combine collections of feature sequences",
description="Combines feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
plugin.methods.register_function(
function=q2_feature_table.merge_taxa,
inputs={'data': List[FeatureData[Taxonomy]]},
parameters={},
outputs=[
('merged_data', FeatureData[Taxonomy])],
input_descriptions={
'data': 'The collection of feature taxonomies to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature taxonomies '
'containing all feature taxonomies provided.')
},
name="Combine collections of feature taxonomies",
description="Combines a pair of feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
T1 = TypeMatch([Frequency, RelativeFrequency, PresenceAbsence, Composition])
plugin.methods.register_function(
function=q2_feature_table.rename_ids,
inputs={
'table': FeatureTable[T1],
},
parameters={
'metadata': MetadataColumn[Categorical],
'strict': Bool,
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('renamed_table', FeatureTable[T1])
],
input_descriptions={
'table': 'The table to be renamed',
},
parameter_descriptions={
'metadata': 'A metadata column defining the new ids. Each original id '
'must map to a new unique id. If strict mode is used, '
'then every id in the original table must have a new id.',
'strict': 'Whether the naming needs to be strict (each id in '
'the table must have a new id). Otherwise, only the '
'ids described in `metadata` will be renamed and '
'the others will keep their original id names.',
'axis': 'Along which axis to rename the ids.',
},
output_descriptions={
'renamed_table': 'A table which has new ids, where the ids are '
'replaced by values in the `metadata` column.',
},
name='Renames sample or feature ids in a table',
description='Renames the sample or feature ids in a feature table using '
'metadata to define the new ids.',
)
# TODO: constrain min/max frequency when optional is handled by typemap
plugin.methods.register_function(
function=q2_feature_table.filter_samples,
inputs={'table': FeatureTable[T1]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_features': Int,
'max_features': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_features': Bool},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which samples should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a sample must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_features': ('The minimum number of features that a sample must '
'have to be retained.'),
'max_features': ('The maximum number of features that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'feature filter will be applied).'),
'metadata': 'Sample metadata used with `where` parameter when '
'selecting samples to retain, or with `exclude_ids` '
'when selecting samples to discard.',
'where': 'SQLite WHERE clause specifying sample metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all samples in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the samples selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_features': 'If true, features which are not present in '
'any retained samples are dropped.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by sample.'
},
name="Filter samples from table",
description="Filter samples from table based on frequency and/or "
"metadata. Any features with a frequency of zero after sample "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
plugin.methods.register_function(
function=q2_feature_table.filter_features_conditionally,
inputs={'table': FeatureTable[T1]},
parameters={'prevalence': Float % Range(0, 1),
'abundance': Float % Range(0, 1)
},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'abundance': ('The minimum relative abundance for a feature to be '
'retained.'),
'prevalence': ('The minimum portion of samples that a feature '
'must have a relative abundance of at least '
'`abundance` to be retained.')
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from a table based on abundance and prevalence",
description=("Filter features based on the relative abundance in a "
"certain portion of samples (i.e., features must have a "
"relative abundance of at least `abundance` in at least "
"`prevalence` number of samples). Any samples with a "
"frequency of zero after feature filtering will also be "
"removed.")
)
plugin.methods.register_function(
function=q2_feature_table.filter_features,
inputs={'table': FeatureTable[Frequency]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_samples': Int,
'max_samples': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_samples': Bool},
outputs=[('filtered_table', FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a feature must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a feature can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_samples': ('The minimum number of samples that a feature must '
'be observed in to be retained.'),
'max_samples': ('The maximum number of samples that a feature can '
'be observed in to be retained. If no value is '
'provided this will default to infinity (i.e., no '
'maximum sample filter will be applied).'),
'metadata': 'Feature metadata used with `where` parameter when '
'selecting features to retain, or with `exclude_ids` '
'when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the features selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_samples': 'If true, drop any samples where none of the '
'retained features are present.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from table",
description="Filter features from table based on frequency and/or "
"metadata. Any samples with a frequency of zero after feature "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
T2 = TypeMatch([Sequence, AlignedSequence])
plugin.methods.register_function(
function=q2_feature_table.filter_seqs,
inputs={
'data': FeatureData[T2],
'table': FeatureTable[Frequency],
},
parameters={
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool
},
outputs=[('filtered_data', FeatureData[T2])],
input_descriptions={
'data': 'The sequences from which features should be filtered.',
'table': 'Table containing feature ids used for id-based filtering.'
},
parameter_descriptions={
'metadata': 'Feature metadata used for id-based filtering, with '
'`where` parameter when selecting features to retain, or '
'with `exclude_ids` when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the sequences will be retained.',
'exclude_ids': 'If true, the features selected by the `metadata` '
'(with or without the `where` parameter) or `table` '
'parameter will be excluded from the filtered '
'sequences instead of being retained.'
},
output_descriptions={
'filtered_data': 'The resulting filtered sequences.'
},
name="Filter features from sequences",
description="Filter features from sequences based on a feature table or "
"metadata. See the filtering tutorial on "
"https://docs.qiime2.org for additional details. This method "
"can filter based on ids in a table or a metadata file, but "
"not both (i.e., the table and metadata options are mutually "
"exclusive)."
)
plugin.visualizers.register_function(
function=q2_feature_table.summarize,
inputs={'table': FeatureTable[Frequency | RelativeFrequency |
PresenceAbsence]},
parameters={'sample_metadata': Metadata},
input_descriptions={'table': 'The feature table to be summarized.'},
parameter_descriptions={'sample_metadata': 'The sample metadata.'},
name="Summarize table",
description="Generate visual and tabular summaries of a feature table."
)
plugin.visualizers.register_function(
function=q2_feature_table.tabulate_seqs,
inputs={'data': FeatureData[Sequence | AlignedSequence]},
parameters={},
input_descriptions={'data': 'The feature sequences to be tabulated.'},
parameter_descriptions={},
name='View sequence associated with each feature',
description="Generate tabular view of feature identifier to sequence "
"mapping, including links to BLAST each sequence against "
"the NCBI nt database.",
citations=[citations['NCBI'], citations['NCBI-BLAST']]
)
plugin.visualizers.register_function(
function=q2_feature_table.core_features,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'min_fraction': Float % Range(0.0, 1.0, inclusive_start=False),
'max_fraction': Float % Range(0.0, 1.0, inclusive_end=True),
'steps': Int % Range(2, None)
},
name='Identify core features in table',
description=('Identify "core" features, which are features observed in a '
'user-defined fraction of the samples. Since the core '
'features are a function of the fraction of samples that the '
'feature must be observed in to be considered core, this is '
'computed over a range of fractions defined by the '
'`min_fraction`, `max_fraction`, and `steps` parameters.'),
input_descriptions={
'table': 'The feature table to use in core features calculations.'
},
parameter_descriptions={
'min_fraction': 'The minimum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'max_fraction': 'The maximum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'steps': 'The number of steps to take between `min_fraction` and '
'`max_fraction` for core features calculations. This '
'parameter has no effect if `min_fraction` and '
'`max_fraction` are the same value.'
}
)
plugin.visualizers.register_function(
function=q2_feature_table.heatmap,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'sample_metadata': MetadataColumn[Categorical],
'feature_metadata': MetadataColumn[Categorical],
'normalize': Bool,
'title': Str,
'metric': Str % Choices(q2_feature_table.heatmap_choices['metric']),
'method': Str % Choices(q2_feature_table.heatmap_choices['method']),
'cluster': Str % Choices(q2_feature_table.heatmap_choices['cluster']),
'color_scheme': Str % Choices(
q2_feature_table.heatmap_choices['color_scheme']),
},
name='Generate a heatmap representation of a feature table',
description='Generate a heatmap representation of a feature table with '
'optional clustering on both the sample and feature axes.\n\n'
'Tip: To generate a heatmap containing taxonomic annotations, '
'use `qiime taxa collapse` to collapse the feature table at '
'the desired taxonomic level.',
input_descriptions={
'table': 'The feature table to visualize.'
},
parameter_descriptions={
'sample_metadata': 'Annotate the sample IDs with these sample '
'metadata values. When metadata is present and '
'`cluster`=\'feature\', samples will be sorted by '
'the metadata values.',
'feature_metadata': 'Annotate the feature IDs with these feature '
'metadata values. When metadata is present and '
'`cluster`=\'sample\', features will be sorted by '
'the metadata values.',
'normalize': 'Normalize the feature table by adding a psuedocount '
'of 1 and then taking the log10 of the table.',
'title': 'Optional custom plot title.',
'metric': 'Metrics exposed by seaborn (see http://seaborn.pydata.org/'
'generated/seaborn.clustermap.html#seaborn.clustermap for '
'more detail).',
'method': 'Clustering methods exposed by seaborn (see http://seaborn.'
'pydata.org/generated/seaborn.clustermap.html#seaborn.clust'
'ermap for more detail).',
'cluster': 'Specify which axes to cluster.',
'color_scheme': 'The matplotlib colorscheme to generate the heatmap '
'with.',
},
citations=[citations['Hunter2007Matplotlib']]
)
|
# pandas standard library
import sys
# third-party
import pandas
import matplotlib
import matplotlib.pyplot as plot
matplotlib.style.use('ggplot')
GENDER_COUNT = 24
MALES_PROMOTED = 21
FEMALES_PROMOTED = 14
GENDER_DIFFERENCE = MALES_PROMOTED - FEMALES_PROMOTED
FEMALES_NOT_PROMOTED = GENDER_COUNT - FEMALES_PROMOTED
MALES_NOT_PROMOTED = GENDER_COUNT - MALES_PROMOTED
experiment_data = pandas.DataFrame({"Promoted": [MALES_PROMOTED,
FEMALES_PROMOTED],
"Not Promoted": [MALES_NOT_PROMOTED,
FEMALES_NOT_PROMOTED]},
index='male female'.split(),
columns=["Promoted", "Not Promoted"])
experiment_frame = experiment_data.copy()
experiment_frame['Total'] = sum((experiment_frame[column] for column in
experiment_frame.columns))
last_row = pandas.DataFrame(experiment_frame.sum()).transpose()
last_row.index = pandas.Index(['Total'])
experiment_frame = pandas.concat((experiment_frame, last_row))
class IndentOutput(object):
"""Fake file output for csv-writing """
@classmethod
def write(cls, line):
"""Write line to stdout with three spaces prepended"""
sys.stdout.write(" {0}".format(line))
print('.. csv-table:: Experiment Outcome')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
experiment_frame.to_csv(IndentOutput, header=False)
print('.. csv-table:: Experiment proportions')
print(' :header: ,{0}\n'.format(','.join(experiment_frame.columns)))
totals = pandas.Series([GENDER_COUNT, GENDER_COUNT, GENDER_COUNT * 2],
index='male female Total'.split())
total_frame = pandas.DataFrame({'Promoted': totals,
"Not Promoted": totals,
"Total": totals})
proportions = experiment_frame/total_frame
proportions.to_csv(IndentOutput, header=False,
columns=['Promoted', 'Not Promoted', 'Total'],
float_format="%.3f")
path = 'figures/gender_experiment_bar.svg'
figure = plot.figure()
axe = figure.gca()
experiment_data.plot(kind='bar', ax=axe)
figure.savefig(path)
print('.. image:: {0}'.format(path))
print(" \\frac{{{0}}}{{{2}}}- \\frac{{{1}}}{{{2}}}&=\\frac{{{3}}}{{{2}}}\\\\".format(MALES_PROMOTED,
FEMALES_PROMOTED,
GENDER_COUNT,
GENDER_DIFFERENCE))
print(" &\\approx {:.3f}\\\\".format(GENDER_DIFFERENCE/GENDER_COUNT))
|
# python3.7
"""Utility functions for latent codes manipulation."""
import numpy as np
from sklearn import svm
from .logger import setup_logger
__all__ = ['train_boundary', 'project_boundary', 'linear_interpolate']
def train_boundary(latent_codes,
scores,
chosen_num_or_ratio=0.02,
split_ratio=0.7,
invalid_value=None,
logger=None):
"""Trains boundary in latent space with offline predicted attribute scores.
Given a collection of latent codes and the attribute scores predicted from the
corresponding images, this function will train a linear SVM by treating it as
a bi-classification problem. Basically, the samples with highest attribute
scores are treated as positive samples, while those with lowest scores as
negative. For now, the latent code can ONLY be with 1 dimension.
NOTE: The returned boundary is with shape (1, latent_space_dim), and also
normalized with unit norm.
Args:
latent_codes: Input latent codes as training data.
scores: Input attribute scores used to generate training labels.
chosen_num_or_ratio: How many samples will be chosen as positive (negative)
samples. If this field lies in range (0, 0.5], `chosen_num_or_ratio *
latent_codes_num` will be used. Otherwise, `min(chosen_num_or_ratio,
0.5 * latent_codes_num)` will be used. (default: 0.02)
split_ratio: Ratio to split training and validation sets. (default: 0.7)
invalid_value: This field is used to filter out data. (default: None)
logger: Logger for recording log messages. If set as `None`, a default
logger, which prints messages from all levels to screen, will be created.
(default: None)
Returns:
A decision boundary with type `numpy.ndarray`.
Raises:
ValueError: If the input `latent_codes` or `scores` are with invalid format.
"""
if not logger:
logger = setup_logger(work_dir='', logger_name='train_boundary')
if (not isinstance(latent_codes, np.ndarray) or
not len(latent_codes.shape) == 2):
raise ValueError(f'Input `latent_codes` should be with type'
f'`numpy.ndarray`, and shape [num_samples, '
f'latent_space_dim]!')
num_samples = latent_codes.shape[0]
latent_space_dim = latent_codes.shape[1]
if (not isinstance(scores, np.ndarray) or not len(scores.shape) == 2 or
not scores.shape[0] == num_samples or not scores.shape[1] == 1):
raise ValueError(f'Input `scores` should be with type `numpy.ndarray`, and '
f'shape [num_samples, 1], where `num_samples` should be '
f'exactly same as that of input `latent_codes`!')
if chosen_num_or_ratio <= 0:
raise ValueError(f'Input `chosen_num_or_ratio` should be positive, '
f'but {chosen_num_or_ratio} received!')
logger.info(f'Filtering training data.')
if invalid_value is not None:
latent_codes = latent_codes[scores != invalid_value]
scores = scores[scores != invalid_value]
logger.info(f'Sorting scores to get positive and negative samples.')
sorted_idx = np.argsort(scores, axis=0)[::-1, 0]
latent_codes = latent_codes[sorted_idx]
scores = scores[sorted_idx]
num_samples = latent_codes.shape[0]
if 0 < chosen_num_or_ratio <= 1:
chosen_num = int(num_samples * chosen_num_or_ratio)
else:
chosen_num = chosen_num_or_ratio
chosen_num = min(chosen_num, num_samples // 2)
logger.info(f'Spliting training and validation sets:')
train_num = int(chosen_num * split_ratio)
val_num = chosen_num - train_num
# Positive samples.
positive_idx = np.arange(chosen_num)
np.random.shuffle(positive_idx)
positive_train = latent_codes[:chosen_num][positive_idx[:train_num]]
positive_val = latent_codes[:chosen_num][positive_idx[train_num:]]
# Negative samples.
negative_idx = np.arange(chosen_num)
np.random.shuffle(negative_idx)
negative_train = latent_codes[-chosen_num:][negative_idx[:train_num]]
negative_val = latent_codes[-chosen_num:][negative_idx[train_num:]]
# Training set.
train_data = np.concatenate([positive_train, negative_train], axis=0)
train_label = np.concatenate([np.ones(train_num, dtype=np.int),
np.zeros(train_num, dtype=np.int)], axis=0)
logger.info(f' Training: {train_num} positive, {train_num} negative.')
# Validation set.
val_data = np.concatenate([positive_val, negative_val], axis=0)
val_label = np.concatenate([np.ones(val_num, dtype=np.int),
np.zeros(val_num, dtype=np.int)], axis=0)
logger.info(f' Validation: {val_num} positive, {val_num} negative.')
# Remaining set.
remaining_num = num_samples - chosen_num * 2
remaining_data = latent_codes[chosen_num:-chosen_num]
remaining_scores = scores[chosen_num:-chosen_num]
decision_value = (scores[0] + scores[-1]) / 2
remaining_label = np.ones(remaining_num, dtype=np.int)
remaining_label[remaining_scores.ravel() < decision_value] = 0
remaining_positive_num = np.sum(remaining_label == 1)
remaining_negative_num = np.sum(remaining_label == 0)
logger.info(f' Remaining: {remaining_positive_num} positive, '
f'{remaining_negative_num} negative.')
logger.info(f'Training boundary.')
clf = svm.SVC(kernel='linear')
classifier = clf.fit(train_data, train_label)
logger.info(f'Finish training.')
if val_num:
val_prediction = classifier.predict(val_data)
correct_num = np.sum(val_label == val_prediction)
logger.info(f'Accuracy for validation set: '
f'{correct_num} / {val_num * 2} = '
f'{correct_num / (val_num * 2):.6f}')
if remaining_num:
remaining_prediction = classifier.predict(remaining_data)
correct_num = np.sum(remaining_label == remaining_prediction)
logger.info(f'Accuracy for remaining set: '
f'{correct_num} / {remaining_num} = '
f'{correct_num / remaining_num:.6f}')
a = classifier.coef_.reshape(1, latent_space_dim).astype(np.float32)
return a / np.linalg.norm(a)
def project_boundary(primal, *args):
"""Projects the primal boundary onto condition boundaries.
The function is used for conditional manipulation, where the projected vector
will be subscribed from the normal direction of the original boundary. Here,
all input boundaries are supposed to have already been normalized to unit
norm, and with same shape [1, latent_space_dim].
NOTE: For now, at most two condition boundaries are supported.
Args:
primal: The primal boundary.
*args: Other boundaries as conditions.
Returns:
A projected boundary (also normalized to unit norm), which is orthogonal to
all condition boundaries.
Raises:
NotImplementedError: If there are more than two condition boundaries.
"""
if len(args) > 2:
raise NotImplementedError(f'This function supports projecting with at most '
f'two conditions.')
assert len(primal.shape) == 2 and primal.shape[0] == 1
if not args:
return primal
if len(args) == 1:
cond = args[0]
assert (len(cond.shape) == 2 and cond.shape[0] == 1 and
cond.shape[1] == primal.shape[1])
new = primal - primal.dot(cond.T) * cond
return new / np.linalg.norm(new)
if len(args) == 2:
cond_1 = args[0]
cond_2 = args[1]
assert (len(cond_1.shape) == 2 and cond_1.shape[0] == 1 and
cond_1.shape[1] == primal.shape[1])
assert (len(cond_2.shape) == 2 and cond_2.shape[0] == 1 and
cond_2.shape[1] == primal.shape[1])
primal_cond_1 = primal.dot(cond_1.T)
primal_cond_2 = primal.dot(cond_2.T)
cond_1_cond_2 = cond_1.dot(cond_2.T)
alpha = (primal_cond_1 - primal_cond_2 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
beta = (primal_cond_2 - primal_cond_1 * cond_1_cond_2) / (
1 - cond_1_cond_2 ** 2 + 1e-8)
new = primal - alpha * cond_1 - beta * cond_2
return new / np.linalg.norm(new)
raise NotImplementedError
def linear_interpolate(latent_code,
boundary,
start_distance=-3.0,
end_distance=3.0,
steps=10):
"""Manipulates the given latent code with respect to a particular boundary.
Basically, this function takes a latent code and a boundary as inputs, and
outputs a collection of manipulated latent codes. For example, let `steps` to
be 10, then the input `latent_code` is with shape [1, latent_space_dim], input
`boundary` is with shape [1, latent_space_dim] and unit norm, the output is
with shape [10, latent_space_dim]. The first output latent code is
`start_distance` away from the given `boundary`, while the last output latent
code is `end_distance` away from the given `boundary`. Remaining latent codes
are linearly interpolated.
Input `latent_code` can also be with shape [1, num_layers, latent_space_dim]
to support W+ space in Style GAN. In this case, all features in W+ space will
be manipulated same as each other. Accordingly, the output will be with shape
[10, num_layers, latent_space_dim].
NOTE: Distance is sign sensitive.
Args:
latent_code: The input latent code for manipulation.
boundary: The semantic boundary as reference.
start_distance: The distance to the boundary where the manipulation starts.
(default: -3.0)
end_distance: The distance to the boundary where the manipulation ends.
(default: 3.0)
steps: Number of steps to move the latent code from start position to end
position. (default: 10)
"""
assert (latent_code.shape[0] == 1 and boundary.shape[0] == 1 and
len(boundary.shape) == 2 and
boundary.shape[1] == latent_code.shape[-1])
linspace = np.linspace(start_distance, end_distance, steps)
if len(latent_code.shape) == 2:
linspace = linspace - latent_code.dot(boundary.T)
linspace = linspace.reshape(-1, 1).astype(np.float32)
return latent_code + linspace * boundary
if len(latent_code.shape) == 3:
linspace = linspace.reshape(-1, 1, 1).astype(np.float32)
return latent_code + linspace * boundary.reshape(1, 1, -1)
raise ValueError(f'Input `latent_code` should be with shape '
f'[1, latent_space_dim] or [1, N, latent_space_dim] for '
f'W+ space in Style GAN!\n'
f'But {latent_code.shape} is received.')
|
from setuptools import setup, find_packages
setup(
name='scrapy-djangoitem',
version='1.1.1',
url='https://github.com/scrapy-plugins/scrapy-djangoitem',
description='Scrapy extension to write scraped items using Django models',
long_description=open('README.rst').read(),
author='Scrapy developers',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Framework :: Django',
'Framework :: Scrapy',
],
install_requires=['six'],
requires=['scrapy (>=0.24.5)', 'django'],
)
|
import os
import pytest
from virtool.subtractions.utils import (
check_subtraction_file_type,
get_subtraction_files,
join_subtraction_path,
rename_bowtie_files,
)
def test_join_subtraction_path(tmp_path, config):
assert join_subtraction_path(config, "bar") == tmp_path / "subtractions" / "bar"
async def test_get_subtraction_files(snapshot, pg, test_subtraction_files):
assert await get_subtraction_files(pg, "foo") == snapshot
def test_rename_bowtie_files(tmp_path):
test_dir = tmp_path / "subtractions"
test_dir.mkdir()
test_dir.joinpath("reference.1.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.2.bt2").write_text("Bowtie2 file")
test_dir.joinpath("reference.3.bt2").write_text("Bowtie2 file")
rename_bowtie_files(test_dir)
assert set(os.listdir(test_dir)) == {
"subtraction.1.bt2",
"subtraction.2.bt2",
"subtraction.3.bt2",
}
@pytest.mark.parametrize("file_type", ["fasta", "bowtie2"])
def test_check_subtraction_file_type(file_type):
if file_type == "fasta":
result = check_subtraction_file_type("subtraction.fa.gz")
assert result == "fasta"
if file_type == "bowtie2":
result = check_subtraction_file_type("subtraction.1.bt2")
assert result == "bowtie2"
|
# -*- coding: utf-8 -*-
"""
Module to compute least cost xmission paths, distances, and costs one or
more SC points
"""
from concurrent.futures import as_completed
import geopandas as gpd
import json
import logging
import numpy as np
import os
import pandas as pd
from pyproj.crs import CRS
import rasterio
from scipy.spatial import cKDTree
from shapely.geometry import Point
import time
from reV.handlers.exclusions import ExclusionLayers
from reV.supply_curve.points import SupplyCurveExtent
from rex.utilities.execution import SpawnProcessPool
from rex.utilities.loggers import log_mem
from reVX.least_cost_xmission.config import (TRANS_LINE_CAT, LOAD_CENTER_CAT,
SINK_CAT, SUBSTATION_CAT)
from reVX.least_cost_xmission.least_cost_paths import LeastCostPaths
from reVX.least_cost_xmission.trans_cap_costs import TransCapCosts
logger = logging.getLogger(__name__)
class LeastCostXmission(LeastCostPaths):
"""
Compute Least Cost tie-line paths and full transmission cap cost
for all possible connections to all supply curve points
-
"""
REQUIRED_LAYRES = ['transmission_barrier', 'ISO_regions']
def __init__(self, cost_fpath, features_fpath, resolution=128,
xmission_config=None):
"""
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
resolution : int, optional
SC point resolution, by default 128
xmission_config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
"""
self._check_layers(cost_fpath)
self._config = TransCapCosts._parse_config(
xmission_config=xmission_config)
(self._sc_points, self._features,
self._sub_lines_mapping, self._shape) =\
self._map_to_costs(cost_fpath, features_fpath,
resolution=resolution)
self._cost_fpath = cost_fpath
self._tree = None
self._sink_coords = None
self._min_line_len = (resolution * 0.09) / 2
logger.debug('{} initialized'.format(self))
def __repr__(self):
msg = ("{} to be computed for {} sc_points and {} features"
.format(self.__class__.__name__,
len(self.sc_points),
len(self.features)))
return msg
@property
def sc_points(self):
"""
Table of supply curve points
Returns
-------
gpd.GeoDataFrame
"""
return self._sc_points
@property
def features(self):
"""
Table of features to compute paths for
Returns
-------
pandas.DataFrame
"""
return self._features
@property
def sub_lines_mapping(self):
"""
Series mapping substations to the transmission lines connected
to each substation
Returns
-------
pandas.Series
"""
return self._sub_lines_mapping
@property
def sink_coords(self):
"""
Inf sink coordinates (row, col)
Returns
-------
ndarray
"""
if self._sink_coords is None:
mask = self.features['category'] == SINK_CAT
self._sink_coords = self.features.loc[mask, ['row', 'col']].values
return self._sink_coords
@property
def sink_tree(self):
"""
cKDTree for infinite sinks
Returns
-------
cKDTree
"""
if self._tree is None:
self._tree = cKDTree(self.sink_coords)
return self._tree
@staticmethod
def _load_trans_feats(features_fpath):
"""
Load existing transmission features from disk. Substations will be
loaded from cache file if it exists
Parameters
----------
features_fpath : str
Path to geopackage with trans features
Returns
-------
features : gpd.GeoDataFrame
DataFrame of transmission features
sub_line_map : pandas.Series
Mapping of sub-station trans_gid to connected tranmission line
trans_gids
"""
logger.debug('Loading transmission features')
features = gpd.read_file(features_fpath)
features = features.drop(columns=['bgid', 'egid', 'cap_left'],
errors='ignore')
mapping = {'gid': 'trans_gid', 'trans_gids': 'trans_line_gids'}
features = features.rename(columns=mapping)
features['min_volts'] = 0
features['max_volts'] = 0
# Transmission lines
mask = features['category'] == TRANS_LINE_CAT
voltage = features.loc[mask, 'voltage'].values
features.loc[mask, 'min_volts'] = voltage
features.loc[mask, 'max_volts'] = voltage
# Load Center and Sinks
mask = features['category'].isin([LOAD_CENTER_CAT, SINK_CAT])
features.loc[mask, 'min_volts'] = 1
features.loc[mask, 'max_volts'] = 9999
sub_lines_map = {}
mask = features['category'] == SUBSTATION_CAT
bad_subs = np.zeros(len(features), dtype=bool)
for idx, row in features.loc[mask].iterrows():
gid = row['trans_gid']
lines = row['trans_line_gids']
if isinstance(lines, str):
lines = json.loads(lines)
sub_lines_map[gid] = lines
lines_mask = features['trans_gid'].isin(lines)
voltage = features.loc[lines_mask, 'voltage'].values
if np.max(voltage) >= 69:
features.loc[idx, 'min_volts'] = np.min(voltage)
features.loc[idx, 'max_volts'] = np.max(voltage)
else:
bad_subs[idx] = True
if any(bad_subs):
msg = ("The following sub-stations do not have the minimum "
"required voltage of 69 kV and will be dropped:\n{}"
.format(features.loc[bad_subs, 'trans_gid']))
logger.warning(msg)
features = features.loc[~bad_subs].reset_index(drop=True)
return features, pd.Series(sub_lines_map)
@staticmethod
def _create_sc_points(cost_fpath, resolution=128):
"""
Load SC points, covert row/col to array wide, and determine x/y for
reV projection
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
resolution : int, optional
SC point resolution, by default 128
Returns
sc_points : gpd.GeoDataFrame
SC points
"""
logger.debug('Loading Supply Curve Points')
sce = SupplyCurveExtent(cost_fpath, resolution=resolution)
sc_points = sce.points.rename(columns={'row_ind': 'sc_row_ind',
'col_ind': 'sc_col_ind'})
shape = sce.excl_shape
sc_points['sc_point_gid'] = sc_points.index.values
row = np.round(sc_points['sc_row_ind'] * resolution + resolution / 2)
row = np.where(row >= shape[0], shape[0] - 1, row)
sc_points['row'] = row.astype(int)
col = np.round(sc_points['sc_col_ind'] * resolution + resolution / 2)
col = np.where(col >= shape[1], shape[1] - 1, col)
sc_points['col'] = col.astype(int)
return sc_points
@staticmethod
def _get_feature_cost_indices(features, crs, transform, shape):
"""
Map features to cost row, col indicies using rasterio transform
Parameters
----------
features : gpd.GeoDataFrame
GeoDataFrame of features to map to cost raster
crs : pyproj.crs.CRS
CRS of cost raster
transform : raster.Affine
Transform of cost raster
shape : tuple
Cost raster shape
Returns
-------
row : ndarray
Vector of row indicies for each feature
col : ndarray
Vector of col indicies for each features
mask : ndarray
Boolean mask of features with indicies outside of cost raster
"""
row, col, mask = super(LeastCostXmission,
LeastCostXmission)._get_feature_cost_indices(
features, crs, transform, shape)
t_lines = features['category'] == TRANS_LINE_CAT
mask |= t_lines
row[t_lines] = np.where(row[t_lines] >= 0, row[t_lines], 0)
row[t_lines] = np.where(row[t_lines] < shape[0], row[t_lines],
shape[0] - 1)
col[t_lines] = np.where(col[t_lines] >= 0, col[t_lines], 0)
col[t_lines] = np.where(col[t_lines] < shape[1], col[t_lines],
shape[1] - 1)
return row, col, mask
@classmethod
def _map_to_costs(cls, cost_fpath, features_fpath, resolution=128):
"""
Map supply curve points and transmission features to cost array pixel
indices
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
resolution : int, optional
SC point resolution, by default 128
Returns
-------
sc_point : gpd.GeoDataFrame
Table of supply curve points to connect to tranmission
features : gpd.GeoDataFrame
Table of transmission features
sub_lines_map : pandas.Series
Series mapping substations to the transmission lines connected
to each substation
"""
with ExclusionLayers(cost_fpath) as f:
crs = CRS.from_string(f.crs)
transform = rasterio.Affine(*f.profile['transform'])
shape = f.shape
regions = f['ISO_regions']
features, sub_lines_map = cls._load_trans_feats(features_fpath)
row, col, mask = cls._get_feature_cost_indices(features, crs,
transform, shape)
if any(~mask):
msg = ("The following features are outside of the cost exclusion "
"domain and will be dropped:\n{}"
.format(features.loc[~mask, 'trans_gid']))
logger.warning(msg)
row = row[mask]
col = col[mask]
features = features.loc[mask].reset_index(drop=True)
features['row'] = row
features['col'] = col
features['region'] = regions[row, col]
logger.debug('Converting SC points to GeoDataFrame')
sc_points = cls._create_sc_points(cost_fpath, resolution=resolution)
x, y = rasterio.transform.xy(transform, sc_points['row'].values,
sc_points['col'].values)
geo = [Point(xy) for xy in zip(x, y)]
sc_points = gpd.GeoDataFrame(sc_points, crs=features.crs,
geometry=geo)
return sc_points, features, sub_lines_map, shape
def _clip_to_sc_point(self, sc_point, tie_line_voltage, nn_sinks=2,
clipping_buffer=1.05):
"""
Clip costs raster to AOI around SC point, and get substations,
load centers, and sinks within the clipped region.
Parameters
----------
sc_point : gpd.GeoSeries
SC point to clip raster around
nn_sinks : int, optional
Number of nearest neighbor sinks to clip to
clipping_buffer : float, optional
Buffer to increase clipping radius by, by default 1.05
Returns
-------
radius : int
Clipping radius in cost raster pixels
x_feats : pd.DataFrame
Substatations, load centers, sinks, and nearest points on t-lines
to SC point
"""
logger.debug('Clipping features to sc_point {}'.format(sc_point.name))
if len(self.sink_coords) > 2:
row, col = sc_point[['row', 'col']].values
_, pos = self.sink_tree.query([row, col], k=nn_sinks)
radius = np.abs(self.sink_coords[pos] - np.array([row, col])).max()
radius = int(np.ceil(radius * clipping_buffer))
logger.debug('Radius to {} nearest sink is: {}'
.format(nn_sinks, radius))
row_min = max(row - radius, 0)
row_max = min(row + radius, self._shape[0])
col_min = max(col - radius, 0)
col_max = min(col + radius, self._shape[1])
logger.debug('Extracting all transmission features in the row '
'slice {}:{} and column slice {}:{}'
.format(row_min, row_max, col_min, col_max))
# Clip transmission features
mask = self.features['row'] >= row_min
mask &= self.features['row'] < row_max
mask &= self.features['col'] >= col_min
mask &= self.features['col'] < col_max
sc_features = self.features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'radius {}'
.format(len(sc_features), radius))
else:
radius = None
sc_features = self.features.copy(deep=True)
mask = self.features['max_volts'] >= tie_line_voltage
sc_features = sc_features.loc[mask].copy(deep=True)
logger.debug('{} transmission features found in clipped area with '
'minimum max voltage of {}'
.format(len(sc_features), tie_line_voltage))
# Find t-lines connected to substations within clip
logger.debug('Collecting transmission lines connected to substations')
mask = sc_features['category'] == SUBSTATION_CAT
if mask.any():
trans_gids = sc_features.loc[mask, 'trans_gid'].values
trans_gids = \
np.concatenate(self.sub_lines_mapping.loc[trans_gids].values)
trans_gids = np.unique(trans_gids)
line_mask = self.features['trans_gid'].isin(trans_gids)
trans_lines = self.features.loc[line_mask].copy(deep=True)
line_mask = trans_lines['trans_gid'].isin(sc_features['trans_gid'])
trans_lines = trans_lines.loc[~line_mask]
logger.debug('Adding all {} transmission lines connected to '
'substations with minimum max voltage of {}'
.format(len(trans_lines), tie_line_voltage))
sc_features = sc_features.append(trans_lines)
return sc_features, radius
def process_sc_points(self, capacity_class, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100,
max_workers=None):
"""
Compute Least Cost Tranmission for desired sc_points
Parameters
----------
capacity_class : str | int
Capacity class of transmission features to connect supply curve
points to
sc_point_gids : list, optional
List of sc_point_gids to connect to, by default None
nn_sinks : int, optional
Number of nearest neighbor sinks to use for clipping radius
calculation, by default 2
clipping_buffer : float, optional
Buffer to expand clipping radius by, by default 1.05
barrier_mult : int, optional
Tranmission barrier multiplier, used when computing the least
cost tie-line path, by default 100
max_workers : int, optional
Number of workers to use for processing, if 1 run in serial,
if None use all available cores, by default None
Returns
-------
least_costs : pandas.DataFrame
Least cost connections between all supply curve points and the
transmission features with the given capacity class that are within
"nn_sink" nearest infinite sinks
"""
max_workers = os.cpu_count() if max_workers is None else max_workers
if sc_point_gids is None:
sc_point_gids = self.sc_points['sc_point_gid'].values
tie_line_voltage = self._config.capacity_to_kv(capacity_class)
least_costs = []
if max_workers > 1:
logger.info('Computing Least Cost Transmission for SC points in '
'parallel on {} workers'.format(max_workers))
loggers = [__name__, 'reV', 'reVX']
with SpawnProcessPool(max_workers=max_workers,
loggers=loggers) as exe:
futures = []
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
future = exe.submit(TransCapCosts.run,
self._cost_fpath,
sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
futures.append(future)
for i, future in enumerate(as_completed(futures)):
sc_costs = future.result()
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i + 1, len(futures)))
log_mem(logger)
else:
logger.info('Computing Least Cost Transmission for SC points in '
'serial')
i = 1
for _, sc_point in self.sc_points.iterrows():
gid = sc_point['sc_point_gid']
if gid in sc_point_gids:
sc_features, radius = self._clip_to_sc_point(
sc_point, tie_line_voltage, nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer)
sc_costs = TransCapCosts.run(
self._cost_fpath, sc_point.copy(deep=True),
sc_features, capacity_class,
radius=radius,
xmission_config=self._config,
barrier_mult=barrier_mult,
min_line_length=self._min_line_len)
if sc_costs is not None:
least_costs.append(sc_costs)
logger.debug('SC point {} of {} complete!'
.format(i, len(sc_point_gids)))
log_mem(logger)
i += 1
least_costs = pd.concat(least_costs).sort_values(['sc_point_gid',
'trans_gid'])
capacity_class = self._config._parse_cap_class(capacity_class)
least_costs['max_cap'] = self._config['power_classes'][capacity_class]
lcp_frac = (len(least_costs['sc_point_gid'].unique())
/ len(sc_point_gids) * 100)
logger.info('{:.4f}% of requested sc point gids were succesfully '
'mapped to transmission features'.format(lcp_frac))
return least_costs.reset_index(drop=True)
@classmethod
def run(cls, cost_fpath, features_fpath, capacity_class, resolution=128,
xmission_config=None, sc_point_gids=None, nn_sinks=2,
clipping_buffer=1.05, barrier_mult=100, max_workers=None):
"""
Find Least Cost Tranmission connections between desired sc_points to
given tranmission features for desired capacity class
Parameters
----------
cost_fpath : str
Path to h5 file with cost rasters and other required layers
features_fpath : str
Path to geopackage with transmission features
capacity_class : str | int
Capacity class of transmission features to connect supply curve
points to
resolution : int, optional
SC point resolution, by default 128
xmission_config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
sc_point_gids : list, optional
List of sc_point_gids to connect to, by default None
nn_sinks : int, optional
Number of nearest neighbor sinks to use for clipping radius
calculation, by default 2
clipping_buffer : float, optional
Buffer to expand clipping radius by, by default 1.05
barrier_mult : int, optional
Tranmission barrier multiplier, used when computing the least
cost tie-line path, by default 100
max_workers : int, optional
Number of workers to use for processing, if 1 run in serial,
if None use all available cores, by default None
Returns
-------
least_costs : pandas.DataFrame
Least cost connections between all supply curve points and the
transmission features with the given capacity class that are within
"nn_sink" nearest infinite sinks
"""
ts = time.time()
lcx = cls(cost_fpath, features_fpath, resolution=resolution,
xmission_config=xmission_config)
least_costs = lcx.process_sc_points(capacity_class,
sc_point_gids=sc_point_gids,
nn_sinks=nn_sinks,
clipping_buffer=clipping_buffer,
barrier_mult=barrier_mult,
max_workers=max_workers)
logger.info('{} connections were made to {} SC points in {:.4f} '
'minutes'
.format(len(least_costs),
len(least_costs['sc_point_gid'].unique()),
(time.time() - ts) / 60))
return least_costs
|
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port),
cfg.StrOpt('backdoor_socket',
help="Enable eventlet backdoor, using the provided path"
" as a unix socket that can receive connections. This"
" option is mutually exclusive with 'backdoor_port' in"
" that only one should be provided. If both are provided"
" then the existence of this option overrides the usage of"
" that option.")
]
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
service_opts = [
cfg.BoolOpt('log_options',
default=True,
help='Enables or disables logging values of all registered '
'options when starting a service (at DEBUG level).'),
cfg.IntOpt('graceful_shutdown_timeout',
default=60,
help='Specify a timeout after which a gracefully shutdown '
'server will exit. Zero value means endless wait.'),
]
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for api service'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: '
'%(status_code)s len: %(body_length)s time:'
' %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be'
'formatted into it: client_ip, date_time, request_line, '
'status_code, body_length, wall_seconds.'),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=100,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated when keystone "
"is configured to use PKI tokens with big service "
"catalogs)."),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help="If False, closes the client socket connection "
"explicitly."),
cfg.IntOpt('client_socket_timeout', default=900,
help="Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever."),
]
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients.",
deprecated_group='DEFAULT',
deprecated_name='ssl_ca_file'),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_cert_file'),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely.",
deprecated_group='DEFAULT',
deprecated_name='ssl_key_file'),
cfg.StrOpt('version',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('ciphers',
help='Sets the list of available ciphers. value should be a '
'string in the OpenSSL cipher list format.'
),
]
|
from interpriters.smart.IntelX25Interpriter import IntelX25Interpriter
from interpriters.smart.SmartBasicInterpriter import SmartBasicInterpriter
from interpriters.smart.SanDiskInterpriter import SmartSanDiskInterpriter
from interpriters.nvme.NvmeBasicInterpriter import NvmeBasicInterpriter
SPECIAL_INTERPRITERS = [SmartSanDiskInterpriter(), IntelX25Interpriter()]
BASIC = [SmartBasicInterpriter(), NvmeBasicInterpriter()]
|
from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,
QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox)
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import warnings, os, time
from skimage.io import imsave
import scipy.ndimage as ndi
from matplotlib.figure import Figure
from scipy.interpolate import interp1d
import matplotlib as mpl
warnings.filterwarnings("ignore")
from matplotlib import rc
rc('font', size=12)
rc('font', family='Arial')
# rc('font', serif='Times')
rc('pdf', fonttype=42)
# rc('text', usetex=True)
class profileAP_condMode(QWidget):
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None, ylabel='Intensity (a.u.)'):
super(profileAP_condMode, self).__init__(parent)
self.data_all = data_all
self.channel = channel
self.colors = colors
self.profileType = profileType
self.ylabel = ylabel
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.groupSelection = self.makeGroupSelectionBtns()
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(QLabel('Background subtraction type:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(self.groupSelection,8,0,1,2)
lay.addWidget(self.applyBtn,9,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Fusion')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def makeGroupSelectionBtns(self):
group = QGroupBox("Groups to plot")
self.groupPlotBtn = []
for i in range(len(self.data_all)):
self.groupPlotBtn.append(QCheckBox('Group '+str(i)))
self.groupPlotBtn[-1].setChecked(True)
self.legendBtn = QCheckBox('Legend')
self.legendBtn.setChecked(False)
self.rawBtn = QCheckBox('Plot raw data')
self.rawBtn.setChecked(True)
lay = QGridLayout()
for i in range(len(self.data_all)):
lay.addWidget(self.groupPlotBtn[i],i,0,1,1)
lay.addWidget(self.legendBtn,0,1,1,1)
lay.addWidget(self.rawBtn,1,1,1,1)
group.setLayout(lay)
return group
def remakePlot(self):
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel(self.ylabel)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
lines = []
for i in range(n_groups):
# plot this group only if the button is checked
if self.groupPlotBtn[i].isChecked():
ydata_group = []
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
ydata_group.append(profiles_all[i][j][k])
# plot the raw data if the button is checked
if self.rawBtn.isChecked():
ax.plot(ydata_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
# compute and plot mean and std
max_length = np.max([len(d) for d in ydata_group])
_mean = np.zeros(max_length)
_std = np.zeros(max_length)
for j in range(max_length):
datapoint = []
for data in ydata_group:
datapoint.append(data[j])
_mean[j] = np.nanmean(datapoint)
_std[j] = np.nanstd(datapoint)
line = ax.plot(_mean,'-',lw=1,c=self.colors[i],label='Mean')[0]
ax.fill_between(range(len(_mean)),_mean-_std,_mean+_std,facecolor=self.colors[i],alpha=.2, linewidth=0.,label='Std')
lines.append(line)
# adjust axes lims
ax.set_ylim(0,None)
ax.set_xlim(0,None)
if self.XnormBtn.isChecked():
ax.set_xlim(0,100)
if self.YnormBtn.currentText() != 'No normalization':
ax.set_ylim(0,1)
# add legend
if self.legendBtn.isChecked():
l = ax.legend(lines,['Group '+str(i+1) for i in range(len(self.groupPlotBtn)) if self.groupPlotBtn[i].isChecked()])
l.get_frame().set_linewidth(0.0)
self.canvas.draw()
class profileAP_tlMode(QWidget):
#############
# TO BE IMPLEMENTED!!!
#############
def __init__(self, data_all, channel, colors, profileType='APprofile', parent=None):
super(profileAP_tlMode, self).__init__(parent)
self.data_all = data_all
self.n_groups = len(data_all)
self.channel = channel
self.colors = colors
self.profileType = profileType
self.make()
def make(self):
self.figure = Figure(figsize=(4, 2.5), dpi=100)
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
self.canvas.draw()
###############################################
settings_group = QGroupBox('Plot settings')
self.YnormBtn = QComboBox()
self.YnormBtn.addItem('No normalization')
self.YnormBtn.addItem('Global percentile')
self.YnormBtn.addItem('Group percentile')
self.YnormBtn.addItem('Folder percentile')
self.YnormBtn.addItem('Manual')
self.XnormBtn = QCheckBox('')
self.XnormBtn.setChecked(False)
self.XnormBtn.stateChanged.connect(self.onCheckingXnormBtn)
self.bckgBtn = QComboBox()
self.bckgBtn.addItem('None')
self.bckgBtn.addItem('Background')
self.bckgBtn.addItem('Minimum')
self.orientationBtn = QComboBox()
self.orientationBtn.addItem('Signal based')
self.orientationBtn.addItem('NO')
self.alignmentBtn = QComboBox()
self.alignmentBtn.addItem('Left')
self.alignmentBtn.addItem('Right')
self.alignmentBtn.addItem('Center')
self.aspectRatioBtn = QCheckBox('')
self.aspectRatioBtn.setChecked(True)
self.groupPlotBtn = QComboBox()
for i in range(len(self.data_all)):
self.groupPlotBtn.addItem('Group '+str(i+1))
lay = QGridLayout(self)
lay.addWidget(QLabel('Background subtraction:'),2,0,1,1)
lay.addWidget(self.bckgBtn,2,1,1,1)
lay.addWidget(QLabel('Y axis normalization:'),4,0,1,1)
lay.addWidget(self.YnormBtn,4,1,1,1)
lay.addWidget(QLabel('X axis normalization:'),5,0,1,1)
lay.addWidget(self.XnormBtn,5,1,1,1)
lay.addWidget(QLabel('A-P orientation correction:'),6,0,1,1)
lay.addWidget(self.orientationBtn,6,1,1,1)
lay.addWidget(QLabel('Alignment:'),7,0,1,1)
lay.addWidget(self.alignmentBtn,7,1,1,1)
lay.addWidget(QLabel('Set axes aspect ratio to equal:'),8,0,1,1)
lay.addWidget(self.aspectRatioBtn,8,1,1,1)
lay.addWidget(QLabel('Current group:'),9,0,1,1)
lay.addWidget(self.groupPlotBtn,9,1,1,2)
settings_group.setLayout(lay)
#######################
self.applyBtn = QPushButton('Apply Settings')
self.applyBtn.clicked.connect(self.remakePlot)
self.saveBtn = QPushButton('Save Tif image')
self.saveBtn.clicked.connect(self.save_tif)
lay = QGridLayout(self)
lay.setSpacing(10)
lay.addWidget(NavigationToolbar(self.canvas, self),0,0,1,2)
lay.addWidget(self.canvas,1,0,1,2)
lay.addWidget(settings_group,2,0,1,2)
lay.addWidget(self.applyBtn,3,0,1,2)
lay.addWidget(self.saveBtn,4,0,1,2)
self.remakePlot()
self.setWindowTitle('Channel')
QApplication.setStyle('Macintosh')
def onCheckingXnormBtn(self):
if self.XnormBtn.isChecked():
self.alignmentBtn.setEnabled(False)
else:
self.alignmentBtn.setEnabled(True)
def remakePlot(self):
n_groups = len(self.data_all)
n_folders = [len(self.data_all[group_idx]) for group_idx in range(n_groups)]
n_gastr = [[len(self.data_all[group_idx][folder_idx]['input_file']) for folder_idx in range(n_folders[group_idx])] for group_idx in range(n_groups)]
# rearrange dataset
profiles_all = [[[0 for k in range(n_gastr[i][j])] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profiles_all[i][j][k] = np.array(self.data_all[i][j][self.profileType][k][self.channel])
# subtract background or not
if self.bckgBtn.currentText() == 'Background':
profiles_all[i][j][k] -= self.data_all[i][j]['Background'][k][self.channel]
if self.bckgBtn.currentText() == 'Minimum':
profiles_all[i][j][k] -= np.min(profiles_all[i][j][k])
# normalize fluorescence intensity accordingly
percs = [None,None]
if self.YnormBtn.currentText() == 'Global percentile':
flat = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat.append(l)
percs = np.percentile(np.array(flat),(.3,99.7))
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[0])/(percs[1]-percs[0]),0,1.)
elif self.YnormBtn.currentText() == 'Group percentile':
flat = [[]for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i].append(l)
percs = [np.percentile(np.array(f),(.3,99.7)) for f in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][0])/(percs[i][1]-percs[i][0]),0,1.)
elif self.YnormBtn.currentText() == 'Folder percentile':
flat = [[[] for j in range(n_folders[i])] for i in range(n_groups)]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
for l in profiles_all[i][j][k]:
flat[i][j].append(l)
percs = [[np.percentile(np.array(f),(.3,99.7)) for f in ff] for ff in flat]
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
# print(percs[i][j])
profile = np.array(profiles_all[i][j][k])
profiles_all[i][j][k] = np.clip((profile-percs[i][j][0])/(percs[i][j][1]-percs[i][j][0]),0,1.)
self.percs = percs
# normalize AP axis if necessary
if self.XnormBtn.isChecked():
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
profile = profiles_all[i][j][k]
x = np.linspace(0,1,len(profile))
fun = interp1d(x,profile)
new_x = np.linspace(0,1,101)
profiles_all[i][j][k] = fun(new_x)
# compute length of longest gastruloid
max_length = []
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
max_length.append(len(profiles_all[i][j][k]))
max_length = np.max(max_length)
# orient plots according to setting
if self.orientationBtn.currentText() == 'Signal based':
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
y = np.array(profiles_all[i][j][k])[~np.isnan(profiles_all[i][j][k])]
n_p = len(y)
if np.sum(y[:int(n_p/2)])>np.sum(y[int(n_p-n_p/2):]):
profiles_all[i][j][k] = profiles_all[i][j][k][::-1]
# pad array to the right or left
for i in range(n_groups):
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
w = max_length-len(profiles_all[i][j][k])
if self.alignmentBtn.currentText() == 'Left':
pad_width = (0,w)
if self.alignmentBtn.currentText() == 'Right':
pad_width = (w,0)
elif self.alignmentBtn.currentText() == 'Center':
if 2*int(w/2)==w:
pad_width = (int(w/2),int(w/2))
else:
pad_width = (int(w/2)+1,int(w/2))
profiles_all[i][j][k] = np.pad(profiles_all[i][j][k],pad_width,mode='constant',constant_values=np.nan)
### make plot
# lines = []
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.95,right=0.95,left=0.2,bottom=0.25)
ax.set_xlabel(self.profileType)
ax.ticklabel_format(axis="x", style="sci", scilimits=(2,2))
ax.set_ylabel('Time')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,2))
# ax.axis('off')
# plot the selected group only
i = self.groupPlotBtn.currentIndex()
# compute and plot mean and std of the selected group
# prepare blank image
max_t = np.max([n_gastr[i][j] for j in range(n_folders[i])])
max_l = np.max([len(profiles_all[i][j][k]) for j in range(n_folders[i]) for k in range(n_gastr[i][j])])
data_mean = np.zeros((max_t,max_l))
data_count = np.zeros((max_t,max_l))
for j in range(n_folders[i]):
for k in range(n_gastr[i][j]):
data = np.nan_to_num(profiles_all[i][j][k])
data_mean[k,:] += data
data_count[k,:] += data!=0
# plot the raw data if the button is checked
# if self.rawBtn.isChecked():
# ax.plot(data_group[-1],'-', lw=.5, c=self.colors[i], alpha = 0.2)
data_mean = data_mean.astype(np.float)/data_count.astype(np.float)
data_mean = np.nan_to_num(data_mean)
aspect = 'auto'
if self.aspectRatioBtn.isChecked():
aspect = 'equal'
ax.imshow(data_mean, aspect=aspect)
ax.set_title('Group '+str(i+1))
self.tif_data = data_mean
self.canvas.draw()
def save_tif(self):
name,_ = QFileDialog.getSaveFileName(self, 'Save Overview File')
if name != '':
### check file extension: allow to save in other formats, but bias towards tif
if os.path.splitext(name)[-1]!='.tif':
buttonReply = QMessageBox.question(self,'File format warning!','File format not recommended. Do you want to save the image as tif?')
if buttonReply == QMessageBox.Yes:
name = os.path.splitext(name)[0]+'.tif'
# convert the image into int16 with the right brightness and contrast
if self.percs[0]!=None:
self.tif_data = (2**16-1)*(self.tif_data-self.percs[0])/(self.percs[1]-self.percs[0])
imsave(name+'', self.tif_data.astype(np.uint16))
|
from .rosetta_util import *
|
# -*- coding: utf-8 -*-
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # New stdlib location in 3.0
from . import _unittest as unittest
from .common import TempDirTestCase
from toron.graph import Graph
from toron._gpn_node import Node
from toron import IN_MEMORY
class TestInstantiation(TempDirTestCase):
def setUp(self):
self.addCleanup(self.cleanup_temp_files)
def test_from_collection(self):
old_boundary = Node(mode=IN_MEMORY, name='old_boundary')
new_boundary = Node(mode=IN_MEMORY, name='new_boundary')
collection = [old_boundary, new_boundary]
graph = Graph(nodes=collection) # Load nodes from list.
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
def test_from_cwd(self):
old_boundary = Node('old_boundary.node')
new_boundary = Node('new_boundary.node')
graph = Graph(path='.') # Load node files in current directory.
node_names = set(graph.nodes.keys())
self.assertSetEqual(set(['old_boundary', 'new_boundary']), node_names)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Softplus and SoftplusGrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.test_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = softplus.eval()
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
def testGradient(self):
with self.test_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test.main()
|
#!/usr/bin/env python
"""Read zip format file from stdin and write new zip to stdout.
With the --store option the output will be an uncompressed zip.
Uncompressed files are stored more efficiently in Git.
https://github.com/costerwi/rezip
"""
import sys
import io
from zipfile import *
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--store",
help="Store data to stdout zip without compression",
action="store_true")
parser.add_argument("-d", "--deterministic",
help="Remove any file metadata in order to obtain a deterministic "\
"zip file. This is usefull in order to make sure that e.g. the "\
"modification date of the zipped files is irrelevant.",
action="store_true")
args = parser.parse_args()
if args.store:
compression = ZIP_STORED
else:
compression = ZIP_DEFLATED
if not hasattr(sys.stdout, 'buffer'):
raise RuntimeError('Sorry, Python3 is required.')
# Use BytesIO objects as random access source and destination files
with io.BytesIO(sys.stdin.buffer.read()) as source, io.BytesIO() as dest:
# Read and re-zip the file in memory
with ZipFile(source, 'r') as source_zip, ZipFile(dest, 'w') as dest_zip:
for info in source_zip.infolist(): # Iterate over each file in zip
if args.deterministic:
newinfo = ZipInfo(info.filename)
newinfo.create_system = 0 # everything else is fixed
else:
newinfo = info
dest_zip.writestr(newinfo, source_zip.read(info), compression)
dest_zip.comment = source_zip.comment # Copy the comment if any
# Write the dest file as binary to stdout
dest.seek(0)
sys.stdout.buffer.write(dest.read())
|
"""
domonic.webapi.dragndrop
====================================
https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API
"""
from domonic.events import DragEvent
class DataTransfer:
def __init__(self):
self.data = {}
self.types = []
self.files = []
self.items = []
self.dropEffect = ""
self.effectAllowed = ""
def clearData(self, type):
self.data[type] = ""
self.types.remove(type)
def getData(self, type):
return self.data[type]
def setData(self, type, data):
self.data[type] = data
self.types.append(type)
def setDragImage(self, image, x, y):
pass
def addElement(self, element):
self.items.append(element)
# def addFile(self, file):
# self.files.append(file)
# class DataTransferItem:
# def __init__(self, type, data):
# self.type = type
# self.data = data
# def getAsString(self):
# return self.data
# def getAsFile(self):
# return self.data
# def getAsFileSystemHandle(self):
# return self.data
# def webkitGetAsEntry(self):
# return self.data
|
"""API ROUTER"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from flask import jsonify, Blueprint
from gfwanalysis.errors import WHRCBiomassError
from gfwanalysis.middleware import get_geo_by_hash, get_geo_by_use, get_geo_by_wdpa, \
get_geo_by_national, get_geo_by_subnational, get_geo_by_regional
from gfwanalysis.routes.api import error, set_params
from gfwanalysis.serializers import serialize_whrc_biomass
from gfwanalysis.services.analysis.whrc_biomass_service import WHRCBiomassService
from gfwanalysis.validators import validate_geostore
whrc_biomass_endpoints_v1 = Blueprint('whrc_biomass', __name__)
def analyze(geojson, area_ha):
"""Analyze WHRC Biomass"""
logging.info('[ROUTER]: WHRC Getting biomass')
if not geojson:
return error(status=400, detail='A Geojson argument is required')
threshold, start, end, table = set_params()
logging.info(f'[ROUTER]: whrc biomass params {threshold}, {start}, {end}')
try:
data = WHRCBiomassService.analyze(
geojson=geojson,
threshold=threshold)
except WHRCBiomassError as e:
logging.error('[ROUTER]: ' + e.message)
return error(status=500, detail=e.message)
except Exception as e:
logging.error('[ROUTER]: ' + str(e))
return error(status=500, detail='Generic Error')
data['area_ha'] = area_ha
data['biomass_density'] = data['biomass'] / data['tree_cover'] if data['tree_cover'] > 0 else 0
# logging.info(f"[Router WHRC Biomass] - response from service: biomass density {data.get('biomass_density')}")
# logging.info(f"[Router WHRC Biomass] - response from service: biomass {data.get('biomass')}")
return jsonify(data=serialize_whrc_biomass(data, 'whrc-biomass')), 200
@whrc_biomass_endpoints_v1.route('/', strict_slashes=False, methods=['GET', 'POST'])
@validate_geostore
@get_geo_by_hash
def get_by_geostore(geojson, area_ha):
"""By Geostore Endpoint"""
logging.info('[ROUTER]: Getting biomass by geostore')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/use/<name>/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_use
def get_by_use(name, id, geojson, area_ha):
"""Use Endpoint"""
logging.info('[ROUTER]: Getting biomass by use')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/wdpa/<id>', strict_slashes=False, methods=['GET'])
@get_geo_by_wdpa
def get_by_wdpa(id, geojson, area_ha):
"""Wdpa Endpoint"""
logging.info('[ROUTER]: Getting biomass by wdpa')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>', strict_slashes=False, methods=['GET'])
@get_geo_by_national
def get_by_national(iso, geojson, area_ha):
"""National Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by iso')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>', strict_slashes=False, methods=['GET'])
@get_geo_by_subnational
def get_by_subnational(iso, id1, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin1')
return analyze(geojson, area_ha)
@whrc_biomass_endpoints_v1.route('/admin/<iso>/<id1>/<id2>', strict_slashes=False, methods=['GET'])
@get_geo_by_regional
def get_by_regional(iso, id1, id2, geojson, area_ha):
"""Subnational Endpoint"""
logging.info('[ROUTER]: Getting biomass loss by admin2 ')
return analyze(geojson, area_ha)
|
#%%
import numpy as np
import pandas as pd
import time
from sklearn.base import BaseEstimator, TransformerMixin
from collections import defaultdict
from sklearn.model_selection import KFold, StratifiedKFold
class Timer:
def __enter__(self):
self.start=time.time()
return self
def __exit__(self, *args):
self.end=time.time()
self.hour, temp = divmod((self.end - self.start), 3600)
self.min, self.second = divmod(temp, 60)
self.hour, self.min, self.second = int(self.hour), int(self.min), round(self.second, 2)
return self
class BayCatEncoder(BaseEstimator, TransformerMixin):
def __init__(self,
group_cols,
target_col='target',
N_min=1, # the higher, the more regularization is introduced into the update.
CV=True,
n_fold=5,
verbosity=True,
delimiter='.',
drop_original=False,
drop_intermediate=False,
random_seed=2020):
self.group_cols = [group_cols] if isinstance(group_cols, str) else group_cols # List of column names combination: e.g. ['n1.n2.n4', 'n3.n4', 'n2'].
self.target_col = target_col # String: 'target' by default.
self.stats = defaultdict(dict) # key: column names combination; value: corresponding info about n, N, and computed code.
self.N_min = N_min # regularization control
self.drop_original = drop_original # toggle key for whether to drop original column name(s) or not.
self.CV = CV # Bool
self.n_fold = n_fold
self.drop_intermediate = drop_intermediate
self.delimiter = delimiter
self.verbosity = verbosity # Bool
self.seed = random_seed
self.set_original_col = set()
def fit(self, X, y):
self.col_subsets = self._generate_subsets(self.group_cols)
df = pd.concat([X.copy(), y.copy()], axis=1)
assert(isinstance(self.target_col, str))
df.columns = X.columns.tolist() + [self.target_col]
assert(self._check_col_consistency(X))
if not self.CV:
self._single_fit(df)
else:
self._cv_fit(df)
return self
def _single_fit(self, df):
size_col_subsets = len(self.col_subsets)
count_subset = 0
print(f'start bayesian target encoding on cross features in the following order: {self.col_subsets}')
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets}')
df_stat, stat, cross_features = self._update(df, subset)
features_encoded = cross_features + '_code'
self.stats[cross_features] = pd.merge(
stat,
df_stat.groupby(subset)[features_encoded].mean(),
left_index=True,
right_index=True)
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _cv_fit(self, df):
kf = StratifiedKFold(n_splits = self.n_fold, shuffle = True, random_state=self.seed)
size_col_subsets = len(self.col_subsets)
count_subset = 0
for subset in self.col_subsets:
count_subset += 1
with Timer() as t:
for i, (tr_idx, val_idx) in enumerate(kf.split(df.drop(self.target_col, axis=1), df[self.target_col])):
if self.verbosity: print(f'{subset} - Order {count_subset}/{size_col_subsets} - Round {i+1}/{self.n_fold}')
df_tr, df_val = df.iloc[tr_idx].copy(), df.iloc[val_idx].copy() # Vital for avoid "A value is trying to be set on a copy of a slice from a DataFrame." warning.
df_stat, stat, cross_features = self._update(df_tr, subset)
features_encoded = cross_features + '_code'
df.loc[df.index[val_idx], features_encoded] = pd.merge(
df_val[subset],
df_stat.groupby(subset)[features_encoded].mean(),
left_on=subset,
right_index=True,
how='left'
)[features_encoded].copy() \
.fillna(df[self.target_col].mean())
self.stats[cross_features] = df.groupby(subset)[features_encoded].mean().to_frame()
if self.verbosity: print(f'time elapsed: {t.hour} hours {t.min} mins {t.second} seconds')
return self
def _update(self, df, subset):
self.global_prior_mean = df[self.target_col].mean()
if len(subset) == 1:
self.set_original_col.add(*subset)
upper_level_cols = 'global'
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = self.global_prior_mean
else:
upper_level_cols = self.delimiter.join(subset[:-1]) # e.g. the n1.n2 subset's upper level feature is `n1`.
if not upper_level_cols + '_prior_mean' in df.columns:
df.loc[:, upper_level_cols + '_prior_mean'] = pd.merge(
df[subset[:-1]],
self.stats[upper_level_cols][upper_level_cols + '_code'],
left_on=subset[:-1],
right_index=True,
how='left'
)[upper_level_cols + '_code'].copy()
stat = df.groupby(subset).agg(
n=(self.target_col, 'sum'),
N=(self.target_col, 'count'),
prior_mean=(upper_level_cols + '_prior_mean', 'mean')
)
# Calculate posterior mean
df_stat = pd.merge(df[subset], stat, left_on=subset, right_index=True, how='left')
df_stat['n'].mask(df_stat['n'].isnull(), df_stat['prior_mean'], inplace=True)
df_stat['N'].fillna(1., inplace=True)
df_stat.loc[:, 'N_prior'] = df_stat['N'].map(lambda x: max(self.N_min - x, 0))
df_stat.loc[:, 'alpha_prior'] = df_stat['prior_mean'] * df_stat['N_prior']
df_stat.loc[:, 'beta_prior'] = (1. - df_stat['prior_mean']) * df_stat['N_prior'] # Large N -> zero N_prior -> zero alpha_prior and zero beta_prior -> if n is zero as well -> alpha prior, beta prior both zero -> alpha zero -> posterior mean = zero as well.
if len(subset) == 1:
cross_features = subset[0]
else:
cross_features = self.delimiter.join(subset)
df_stat.loc[:, cross_features + '_code'] = df_stat.apply(self._stat_mean, axis=1) # core # TEST set!!
return df_stat, stat, cross_features
def _generate_subsets(self, groups, delimiter='.'):
subsets = defaultdict(list)
for g in groups:
chain = g.split(delimiter)
for i in range(len(chain)):
if chain[i] and not chain[:i+1] in subsets[i]: subsets[i].append(chain[:i+1])
ret = []
for _, v in subsets.items():
if not v in ret: ret.extend(v)
return ret
def _stat_mean(self, X):
df = X.copy()
alpha = df['alpha_prior'] + df['n']
beta = df['beta_prior'] + df['N'] - df['n']
return alpha / (alpha + beta)
def _check_col_consistency(self, df):
"""Check whether columns specified in `self.group_cols` are all included in `df`.
"""
s = set()
for col_subset in self.col_subsets:
s |= set(col_subset)
for col in s:
if not col in df.columns: return False
return True
def transform(self, X):
assert(self._check_col_consistency(X))
for subset in self.col_subsets:
key = '.'.join(subset)
X = pd.merge(
X,
self.stats[key][key + '_code'],
left_on=subset,
right_index=True,
how='left')
if len(subset) == 1:
X[key + '_code'].fillna(self.global_prior_mean, inplace=True)
else:
parent_key = '.'.join(subset[:-1]) + '_code'
X[key + '_code'].fillna(X[parent_key].mask(X[parent_key] > self.global_prior_mean, self.global_prior_mean), inplace=True)
if self.drop_original:
for col in self.set_original_col:
X.drop(col, axis=1, inplace=True)
X.rename(columns={col+'_code': col}, inplace=True)
if self.drop_intermediate:
for col in X.columns:
if col.endswith('_code') and not col.strip('_code') in self.group_cols:
X.drop(col, axis=1, inplace=True)
return X
#%%
if __name__ == '__main__':
np.random.seed(1)
k = 15
n1 = np.random.choice(['a','b'], k)
n2 = np.random.choice(['c','d'], k)
n3 = np.random.choice(['e','f'], k)
target = np.random.randint(0, 2, size=k)
train = pd.DataFrame(
{'n1': n1, 'n2': n2, 'n3':n3, 'target': target},
columns=['n1', 'n2', 'n3', 'target']
)
train.columns = ['n1','n2','n3', 'target']
train
k = 6
n4 = np.random.choice(['a','b'], k)
n5 = np.random.choice(['c','d'], k)
n6 = np.random.choice(['e','f'], k)
test = pd.DataFrame({'n4': n4, 'n2': n5, 'n3':n6})
test.columns = ['n1','n2','n3']
test
te = BayCatEncoder(
'n1.n2.n3', #['n1.n2.n3', 'n2.n3', 'n3'],
target_col='target',
drop_original=False,
drop_intermediate=False,
CV=False
) \
.fit(train.drop('target', axis=1), train.target)
# te.transform(test)
te.transform(test)
# %%
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from commands.basecommand import BaseCommand
class Ports(BaseCommand):
def __init__(self):
self.__name__ = 'Ports'
def run_ssh(self, sshc):
res = self._ssh_data_with_header(sshc, '/ip service print detail')
sus_dns, recommendation = self.check_results_ssh(res)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res):
sus_ports = []
recommendation = []
def_ports = {'telnet': 23, 'ftp': 21, 'www': 80, 'ssh': 22, 'www-ssl': 443, 'api': 8728, 'winbox': 8291,
'api-ssl': 8729}
for item in res:
service = item['name']
if def_ports[service] != int(item['port']):
sus_ports.append(f'The port for {service}, has changed from {def_ports[service]} to {item["port"]} - '
f'severity: low')
if (service == 'ssh') and (int(item['port']) == 22):
recommendation.append('The port for ssh protocol is as ssh default port (22)- Mikrotik company '
'recommended to change it')
return sus_ports, recommendation
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 09\exercicio-09-35.py
##############################################################################
import sys
import os
import os.path
# este módulo ajuda com a conversão de nomes de arquivos para links
# válidos em HTML
import urllib.request
mascara_do_estilo = "'margin: 5px 0px 5px %dpx;'"
def gera_estilo(nível):
return mascara_do_estilo % (nível * 20)
def gera_listagem(página, diretório):
nraiz = os.path.abspath(diretório).count(os.sep)
for raiz, diretórios, arquivos in os.walk(diretório):
nível = raiz.count(os.sep) - nraiz
página.write(f"<p style={gera_estilo(nível)}>{raiz}</p>")
estilo = gera_estilo(nível+1)
for a in arquivos:
caminho_completo = os.path.join(raiz, a)
tamanho = os.path.getsize(caminho_completo)
link = urllib.request.pathname2url(caminho_completo)
página.write(f"<p style={estilo}><a href='{link}'>{a}</a> ({tamanho} bytes)</p>")
if len(sys.argv) < 2:
print("Digite o nome do diretório para coletar os arquivos!")
sys.exit(1)
diretório = sys.argv[1]
página = open("arquivos.html", "w", encoding="utf-8")
página.write("""
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Arquivos</title>
</head>
<body>
""")
página.write(f"Arquivos encontrados a partir do diretório: {diretório}")
gera_listagem(página, diretório)
página.write("""
</body>
</html>
""")
página.close()
|
from InstagramAPI import InstagramAPI
from colorama import Fore, Back, Style
import getpass
import sys
import webbrowser
import time
import requests
import json
print(Fore.GREEN + """
░░███╗░░███╗░░██╗░██████╗████████╗░░██╗██╗██████╗░░█████╗░███╗░░░███╗██████╗░██████╗░██████╗░
░████║░░████╗░██║██╔════╝╚══██╔══╝░██╔╝██║██╔══██╗██╔══██╗████╗░████║██╔══██╗╚════██╗██╔══██╗
██╔██║░░██╔██╗██║╚█████╗░░░░██║░░░██╔╝░██║██████╦╝██║░░██║██╔████╔██║██████╦╝░█████╔╝██████╔╝
╚═╝██║░░██║╚████║░╚═══██╗░░░██║░░░███████║██╔══██╗██║░░██║██║╚██╔╝██║██╔══██╗░╚═══██╗██╔══██╗
███████╗██║░╚███║██████╔╝░░░██║░░░╚════██║██████╦╝╚█████╔╝██║░╚═╝░██║██████╦╝██████╔╝██║░░██║
╚══════╝╚═╝░░╚══╝╚═════╝░░░░╚═╝░░░░░░░░╚═╝╚═════╝░░╚════╝░╚═╝░░░░░╚═╝╚═════╝░╚═════╝░╚═╝░░╚═╝
Author: @SashaSarzh """ + Style.RESET_ALL)
nostop = 0
accounts = input("Input here list of your account(If haven't type Enter): ")
if not accounts:
username = input("Your Login: ")
password = getpass.getpass()
api = InstagramAPI(username, password)
api.login()
istimes = 0
else:
f = open(accounts, 'r')
NumberOfLine = 0
for line in f:
NumberOfLine += 1
username, password = line.split(':')
print ("Login found: ", username)
api = InstagramAPI(username, password)
api.login()
istimes = 0
user = input("Victims nickname: ")
url = "https://www.instagram.com/web/search/topsearch/?context=blended&query="+user+"&rank_token=0.3953592318270893&count=1"
response = requests.get(url)
respJSON = response.json()
user_id = str( respJSON['users'][0].get("user").get("pk") )
while True:
if user == "niggvard" or user == "jidkiypuk2":
print("No no no")
sys.exit()
else: break
message = input("Text of message: ")
if istimes == 0:
times = int(input("How many messages you want send: "))
elif istimes == 1:
times = NumberOfLine
print("You will use bomber ", times,"times ", user_id, "with message: ", message, ".")
ask = input("Do you want continue[y/n]: ")
if ask == 'y':
print('Starting..')
elif ask == 'n':
print('Stopping..')
sys.exit()
else:
print('Stopping')
sys.exit()
while times > nostop:
nostop = nostop + 1
api.sendMessage(user_id,message)
print(nostop, ">> Send", user, ": ", message)
|
# The MIT License
#
# Copyright 2015-2017 University Library Bochum <bibliogaphie-ub@rub.de> and UB Dortmund <api.ub@tu-dortmund.de>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (absolute_import, division, print_function, unicode_literals)
import logging
import re
from logging.handlers import RotatingFileHandler
from bs4 import BeautifulSoup
import requests
import simplejson as json
from flask import Flask, request, jsonify, url_for
from flask import make_response
from flask_cors import CORS
from flask_swagger import swagger
from flask_wtf.csrf import CSRFProtect
from forms.forms import *
import persistence
from utils.solr_handler import Solr
try:
import local_stats_secrets as secrets
except ImportError:
import stats_secrets as secrets
class ReverseProxied(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
app = Flask(__name__)
CORS(app)
if secrets.DIFFERENT_PROXY_PATH:
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.debug = secrets.DEBUG
app.secret_key = secrets.DEBUG_KEY
app.config['DEBUG_TB_INTERCEPT_REDIRECTS '] = False
csrf = CSRFProtect(app)
log_formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(secrets.LOGFILE, maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
handler.setFormatter(log_formatter)
app.logger.addHandler(handler)
log = logging.getLogger('werkzeug')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
# ---------- EXPORT ----------
@app.route('/export/openapc/<year>', methods=['GET'])
@csrf.exempt
def export_openapc(year=''):
'''
Getting a bibliography
swagger_from_file: api_doc/export_openapc.yml
'''
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"institution";"period";"euro";"doi";"is_hybrid";"publisher";"journal_full_title";"issn";"url";"local_id"\n'
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='oa_funds:true', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year])
oa_solr.request()
results = oa_solr.results
if len(results) > 0:
for record in results:
thedata = json.loads(record.get('wtf_json'))
doi = record.get('doi')[0]
is_hybrid = False
if record.get('is_hybrid'):
is_hybrid = record.get('is_hybrid')
publisher = ''
journal_title = ''
issn = ''
url = ''
if not doi:
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
# print(json.dumps(record, indent=4))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
url = ''
if thedata.get('uri'):
for uri in thedata.get('uri'):
url = uri
break
csv += '"%s";%s;%s;"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
affiliation_str,
year,
0.00,
doi,
is_hybrid,
publisher,
journal_title,
issn,
url,
record.get('id')
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No results', 404)
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
@app.route('/export/oa_report/<year>', methods=['GET'])
@csrf.exempt
def export_oa_report(year=''):
'''
Getting a bibliography
swagger_from_file: api_doc/export_oa_report.yml
'''
pubtype = request.args.get('pubtype', 'ArticleJournal')
if theme(request.access_route) == 'dortmund':
affiliation = 'tudo'
affiliation_str = 'TU Dortmund'
elif theme(request.access_route) == 'bochum':
affiliation = 'rubi'
affiliation_str = 'Ruhr-Universität Bochum'
else:
affiliation = ''
affiliation_str = ''
if affiliation:
csv = '"AU";"TI";"SO";"DT";"RP";"EM";"OI";"PU";"ISSN";"E-ISSN";"DOI";"OA";"RP TUDO";"Fak"\n'
# TODO search for all publications of the given year
oa_solr = Solr(host=secrets.SOLR_HOST, port=secrets.SOLR_PORT,
application=secrets.SOLR_APP, core='hb2', handler='query',
query='*:*', facet='false', rows=100000,
fquery=['%s:true' % affiliation, 'fdate:%s' % year, 'pubtype:%s' % pubtype])
oa_solr.request()
results = oa_solr.results
if results:
for record in results:
thedata = json.loads(record.get('wtf_json'))
author = ''
corresponding_author = ''
corresponding_affiliation = ''
faks = ''
for person in thedata.get('person'):
if 'aut' in person.get('role'):
author += person.get('name') + ';'
if person.get('corresponding_author'):
corresponding_author = person.get('name')
if person.get('tudo'):
corresponding_affiliation = True
if person.get('gnd'):
tudo = persistence.get_person(person.get('gnd'))
# print(person.get('gnd'))
if tudo:
if tudo.get('affiliation_id'):
faks = ''
for entry in tudo.get('affiliation_id'):
affil = persistence.get_orga(entry)
fak = ''
if affil:
has_parent = False
fak = affil.get('pref_label')
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
while has_parent:
affil = persistence.get_orga(affil.get('parent_id'))
if affil.get('parent_id'):
has_parent = True
fak = '%s / %s' % (affil.get('parent_label'), affil.get('pref_label'))
else:
has_parent = False
else:
fak = 'LinkError: Person %s' % person.get('gnd')
faks += fak + ';'
faks = faks[:-1]
author = author[:-1]
publisher = ''
journal_title = ''
issn = ''
journal_title = ''
if record.get('is_part_of_id'):
if record.get('is_part_of_id')[0]:
host = persistence.get_work(record.get('is_part_of_id')[0])
if host:
record = json.loads(host.get('wtf_json'))
# print(json.dumps(record, indent=4))
journal_title = record.get('title')
if record.get('fsubseries'):
journal_title = record.get('fsubseries')
publisher = ''
if record.get('publisher'):
publisher = record.get('publisher')
issn = ''
if record.get('ISSN'):
for entry in record.get('ISSN'):
if entry:
issn = entry
break
csv += '"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s";"%s"\n' % (
author,
thedata.get('title'),
journal_title,
'article',
corresponding_author,
'',
'',
publisher,
issn,
'',
thedata.get('DOI')[0],
thedata.get('oa_funded'),
corresponding_affiliation,
faks,
)
resp = make_response(csv, 200)
resp.headers['Content-Type'] = 'text/csv; charset=utf-8'
return resp
else:
return make_response('No affiliation parameter set. Please contact the administrator!', 400)
# ---------- STATISTICS ----------
# ---------- REST ----------
@app.route("/export/spec")
def spec():
swag = swagger(app, from_file_keyword='swagger_from_file')
swag['info']['version'] = secrets.SWAGGER_API_VERSION
swag['info']['title'] = secrets.SWAGGER_TITLE
swag['info']['description'] = secrets.SWAGGER_DESCRIPTION
swag['schemes'] = secrets.SWAGGER_SCHEMES
swag['host'] = secrets.SWAGGER_HOST
swag['basePath'] = secrets.SWAGGER_BASEPATH
swag['tags'] = [
{
'name': 'monitoring',
'description': 'Methods for monitoring the service'
},
{
'name': 'export',
'description': 'Special data views as exports'
},
{
'name': 'statistics',
'description': 'Statistics'
},
]
return jsonify(swag)
@app.route('/export/_ping')
@csrf.exempt
def _ping():
"""
Ping the service
swagger_from_file: bibliography_doc/_ping.yml
"""
try:
if 'failed' in json.dumps(dependencies_health(), indent=4):
return make_response('One or more dependencies unavailable!', 500)
else:
return make_response('pong', 200)
except Exception:
return make_response('One or more dependencies unavailable!', 500)
@app.route('/export/_health')
@csrf.exempt
def _health():
"""
Showing the health of the service an its dependencies
swagger_from_file: bibliography_doc/_health.yml
"""
health_json = {
"name": "hb2_flask",
"timestamp": timestamp(),
"dependencies": dependencies_health()
}
json_string = json.dumps(health_json, indent=4)
status = 200
if 'failed' in json_string:
status = 500
response = make_response(json_string, status)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Content-type'] = 'application/json'
return response
def dependencies_health():
dependencies = []
# health of Solr cores
try:
status = requests.get(
'http://%s:%s/%s/hb2/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2"',
'status': status,
'description': 'Storage for bibliographic data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/hb2_users/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "hb2_users"',
'status': status,
'description': 'Storage for registered users',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/group/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "group',
'status': status,
'description': 'Storage for working groups or projects data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/organisation/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "organisation',
'status': status,
'description': 'Storage for organisations data',
'external': False
})
try:
status = requests.get(
'http://%s:%s/%s/person/admin/ping?wt=json' % (secrets.SOLR_HOST, secrets.SOLR_PORT, secrets.SOLR_APP),
headers={'Accept': 'application/json'}).json().get('status').lower()
except requests.exceptions.ConnectionError:
status = 'failed'
dependencies.append({
'service': 'Solr Core "person',
'status': status,
'description': 'Storage for persons data',
'external': False
})
return dependencies
# ---------- MAIN ----------
def str2bool(v):
if str(v).lower() in ("yes", "true", "True", "t", "1"):
return True
else:
return False
def theme(ip):
# logging.info('IPs: %s' % len(ip))
# logging.info('IPs: %s' % ip)
site = 'dortmund'
try:
idx = len(ip)-2
except Exception:
idx = ip[0]
if ip[idx].startswith('134.147'):
site = 'bochum'
elif ip[idx].startswith('129.217'):
site = 'dortmund'
return site
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
if __name__ == '__main__':
app.run(port=secrets.APP_PORT)
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ConfigParser
import logging
import os
import subprocess
import time
from autothreadharness import settings
logger = logging.getLogger(__name__)
HARNESS_SVN_VERSION_R44 = 1471
"""int: this is the first published release that miniweb was removed from Harness"""
def _try_kill(proc):
logger.info('Try kill process')
times = 1
while proc.poll() is None:
proc.kill()
time.sleep(5)
if proc.poll() is not None:
logger.info('Process has been killed')
break
logger.info('Trial %d failed', times)
times += 1
if times > 3:
raise SystemExit()
class HarnessController(object):
"""Harness service control
This controls harness service, including the harness back-end and front-end.
"""
harness = None
"""harness back-end"""
miniweb = None
"""harness front-end"""
def __init__(self, result_dir=None):
self.result_dir = result_dir
self.harness_file = ''
harness_info = ConfigParser.ConfigParser()
harness_info.read('%s\\info.ini' % settings.HARNESS_HOME)
self.version = harness_info.getint('Thread_Harness_Info', 'SVN')
def start(self):
logger.info('Starting harness service')
if self.harness:
logger.warning('Harness already started')
else:
env = dict(os.environ, PYTHONPATH='%s\\Thread_Harness;%s\\ThirdParty\\hsdk-python\\src'
% (settings.HARNESS_HOME, settings.HARNESS_HOME))
self.harness_file = '%s\\harness-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S'))
with open(self.harness_file, 'w') as harness_out:
self.harness = subprocess.Popen([settings.HARNESS_HOME + '\\Python27\\python.exe',
settings.HARNESS_HOME + '\\Thread_Harness\\Run.py'],
cwd=settings.HARNESS_HOME,
stdout=harness_out,
stderr=harness_out,
env=env)
time.sleep(2)
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
logger.warning('Miniweb already started')
else:
with open('%s\\miniweb-%s.log' % (self.result_dir, time.strftime('%Y%m%d%H%M%S')), 'w') as miniweb_out:
self.miniweb = subprocess.Popen([settings.HARNESS_HOME + '\\MiniWeb\\miniweb.exe'],
stdout=miniweb_out,
stderr=miniweb_out,
cwd=settings.HARNESS_HOME + '\\MiniWeb')
def stop(self):
logger.info('Stopping harness service')
if self.harness:
_try_kill(self.harness)
self.harness = None
else:
logger.warning('Harness not started yet')
if self.version >= HARNESS_SVN_VERSION_R44:
return
if self.miniweb:
_try_kill(self.miniweb)
self.miniweb = None
else:
logger.warning('Miniweb not started yet')
def tail(self):
with open(self.harness_file) as harness_out:
harness_out.seek(-100, 2)
return ''.join(harness_out.readlines())
def __del__(self):
self.stop()
|
# Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from typing import List, Optional, Dict
from dragonchain import logger
from dragonchain import exceptions
from dragonchain.lib.dto import smart_contract_model
from dragonchain.lib.interfaces import storage
from dragonchain.lib.database import redisearch
from dragonchain.lib import faas
# Constants
FOLDER = "SMARTCONTRACT"
_log = logger.get_logger()
def get_contract_id_by_txn_type(txn_type: str) -> str:
results = redisearch.search(
index=redisearch.Indexes.smartcontract.value, query_str=f"@sc_name:{{{redisearch.get_escaped_redisearch_string(txn_type)}}}", only_id=True
).docs
if results:
return results[0].id
raise exceptions.NotFound(f"Smart contract {txn_type} could not be found.")
def get_contract_by_txn_type(txn_type: str) -> smart_contract_model.SmartContractModel:
"""Searches for a contract by txn_type"""
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{get_contract_id_by_txn_type(txn_type)}/metadata.json"))
def list_all_contract_ids() -> List[str]:
query_result = redisearch.search(index=redisearch.Indexes.smartcontract.value, query_str="*", limit=10000, only_id=True)
contract_ids = []
for index in query_result.docs:
contract_ids.append(index.id)
return contract_ids
def get_serial_contracts() -> List[smart_contract_model.SmartContractModel]:
"""
Searches for serial contracts
Please note this function fetches all smart contract metadata from storage each time it is run, so should be used sparingly
"""
# First check and remove bad contracts or this function could fail
remove_bad_contracts()
serial_contracts = []
for sc_id in list_all_contract_ids():
sc_model = get_contract_by_id(sc_id)
if sc_model.execution_order == "serial":
serial_contracts.append(sc_model)
return serial_contracts
def remove_bad_contracts() -> None:
"""Remove contract(s) from the index if its metadata doesn't exist"""
for sc_id in list_all_contract_ids():
try:
get_contract_by_id(sc_id)
except exceptions.NotFound:
redisearch.delete_document(index=redisearch.Indexes.smartcontract.value, doc_name=sc_id)
def add_smart_contract_index(contract: smart_contract_model.SmartContractModel) -> None:
"""Add the index for a smart contract"""
redisearch.put_document(redisearch.Indexes.smartcontract.value, contract.id, {"sc_name": contract.txn_type}, upsert=True)
def remove_smart_contract_index(contract_id: str) -> None:
"""Remove the index for a smart contract"""
redisearch.delete_document(redisearch.Indexes.smartcontract.value, contract_id)
def get_contract_by_id(contract_id: str) -> smart_contract_model.SmartContractModel:
"""Searches for a contract by contract_id"""
return smart_contract_model.new_from_at_rest(storage.get_json_from_object(f"{FOLDER}/{contract_id}/metadata.json"))
def contract_does_exist(contract_id: str) -> bool:
"""Checks if a contract exists or not"""
return storage.does_object_exist(f"{FOLDER}/{contract_id}/metadata.json")
def get_contract_logs(contract_id, since: Optional[str], tail: Optional[int]) -> List[Dict[str, str]]:
"""Returns a list of smart contract logs from openfaas"""
return faas.get_logs(contract_id, since, tail)
|
# Copyright (c) 2020 Institute for Quantum Computing, Baidu Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main
"""
from paddle import fluid
import os
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from paddle_quantum.utils import pauli_str_to_matrix
from paddle_quantum.QAOA.Paddle_QAOA import Paddle_QAOA
from paddle_quantum.QAOA.QAOA_Prefunc import generate_graph, H_generator
def main(N=4):
# number of qubits or number of nodes in the graph
N = 4
classical_graph, classical_graph_adjacency = generate_graph(N, GRAPHMETHOD=1)
print(classical_graph_adjacency)
# Convert the Hamiltonian's list form to matrix form
H_matrix = pauli_str_to_matrix(H_generator(N, classical_graph_adjacency), N)
H_diag = np.diag(H_matrix).real
H_max = np.max(H_diag)
H_min = np.min(H_diag)
print(H_diag)
print('H_max:', H_max, ' H_min:', H_min)
pos = nx.circular_layout(classical_graph)
nx.draw(classical_graph, pos, width=4, with_labels=True, font_weight='bold')
plt.show()
classical_graph, classical_graph_adjacency = generate_graph(N, 1)
opt_cir = Paddle_QAOA(classical_graph_adjacency, N=4, P=4, METHOD=1, ITR=120, LR=0.1)
# Load the data of QAOA
x1 = np.load('./output/summary_data.npz')
H_min = np.ones([len(x1['iter'])]) * H_min
# Plot loss
loss_QAOA, = plt.plot(x1['iter'], x1['energy'], alpha=0.7, marker='', linestyle="--", linewidth=2, color='m')
benchmark, = plt.plot(x1['iter'], H_min, alpha=0.7, marker='', linestyle=":", linewidth=2, color='b')
plt.xlabel('Number of iteration')
plt.ylabel('Performance of the loss function for QAOA')
plt.legend(handles=[
loss_QAOA,
benchmark
],
labels=[
r'Loss function $\left\langle {\psi \left( {\bf{\theta }} \right)} '
r'\right|H\left| {\psi \left( {\bf{\theta }} \right)} \right\rangle $',
'The benchmark result',
], loc='best')
# Show the plot
plt.show()
with fluid.dygraph.guard():
# Measure the output state of the QAOA circuit for 1024 shots by default
prob_measure = opt_cir.measure(plot=True)
# Find the max value in measured probability of bitstrings
max_prob = max(prob_measure.values())
# Find the bitstring with max probability
solution_list = [result[0] for result in prob_measure.items() if result[1] == max_prob]
print("The output bitstring:", solution_list)
# Draw the graph representing the first bitstring in the solution_list to the MaxCut-like problem
head_bitstring = solution_list[0]
node_cut = ["blue" if head_bitstring[node] == "1" else "red" for node in classical_graph]
edge_cut = [
"solid" if head_bitstring[node_row] == head_bitstring[node_col] else "dashed"
for node_row, node_col in classical_graph.edges()
]
nx.draw(
classical_graph,
pos,
node_color=node_cut,
style=edge_cut,
width=4,
with_labels=True,
font_weight="bold",
)
plt.show()
if __name__ == "__main__":
main()
|
# Space: O(n)
# Time: O(n)
class Solution:
def numDecodings(self, s: str) -> int:
if len(s) == 0: return 0
self.cache = {}
self.cache[''] = 1
def recursive(string):
if string in self.cache: return self.cache[string]
if string[0] == '0': return 0
if len(string) == 1: return 1
temp_res = recursive(string[1:])
prefix = int(string[:2])
if 0 < prefix <= 26:
temp_res += recursive(string[2:])
self.cache[string] = temp_res
return temp_res
return recursive(s)
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
class BaseSecurityGroupsTest(base.BaseV2ComputeTest):
@classmethod
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(BaseSecurityGroupsTest, cls).setup_credentials()
|
from .embedvideos import EmbedVideosXBlock
|
from .toolbox import Vin
from .exceptions import ValidationError, VininfoException
VERSION = (1, 6, 0)
"""Application version number tuple."""
VERSION_STR = '.'.join(map(str, VERSION))
"""Application version number string."""
|
# -*- coding: utf-8 -*-
from lxml import etree
import pkgutil
from io import BytesIO
from . import xml_functions, construction_functions, layer_functions
from . import surface_functions, space_functions, building_functions
from . import opening_functions, zone_functions
class Gbxml():
"A class that represents a gbXML file and the gbXML schema"
def __init__(self,
gbxml_fp=None,
gbxsd_fp=None):
"""Initialises a new Gbxml instance
Arguments:
gbxml_fp (str): filepath to a gbXML file. This is read in as an
lxml._ElementTree object. If not supplied then a
new lxml._ElementTree object with only a root element is created.
gbxsd_fp (str): filepath to a gbXML schema file. If not supplied
then a default gbXMl schema file is used.
"""
if gbxml_fp:
self._ElementTree=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'blank.xml')
self._ElementTree=etree.parse(BytesIO(st))
if gbxsd_fp:
self._ElementTree_gbxsd=etree.parse(gbxml_fp)
else:
st = pkgutil.get_data(__package__, 'GreenBuildingXML_Ver6.01.xsd')
self._ElementTree_gbxsd=etree.parse(BytesIO(st))
self.ns={'gbxml':'http://www.gbxml.org/schema'}
# general query methods
def get_ids(self, tag=None):
"""Returns the id attributes of elements
:param tag: an element tag to filter on
:type tag: str, optional
:return: a list of element ids
:rtype: list
"""
if tag is None: tag='*'
element=self._ElementTree.getroot()
return xml_functions.get_ids(element,tag)
def get_xmlstring(self,id=None):
"""Returns a string of an xml element
:param id: an element id to filter on
:type id: str, optional
:return: a string of xml contents
:rtype: str
"""
element=self._ElementTree.getroot()
if not id is None:
st='//gbxml:*[@id="%s"]' % id
element=element.xpath(st,namespaces=self.ns)[0]
return xml_functions.get_xmlstring(element)
def get_attributes(self,id):
"""Returns the attributes of an element
:param id: an element id
:type id: str
:return: the attributes of the element
:rtype: dict
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_attributes(element)
def get_child_tags(self,id):
"""Returns the child tags of an element
:param id: an element id
:type id: str
:return: a list of the tags of the child elements
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_child_tag_text(self,id,child_tag):
"""Returns the text of child elements
:param id: an element id
:type id: str
:param child_tag: a tag of a child element
:type child_tag: str
:return: a list of the text of child elements with the child_tag tag
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
def get_child_tag_attributes(self,id,child_tag):
"""Returns the attributes of child elements
:param id: an element id
:type id: str
:param child_tag: a tag of a child element
:type child_tag: str
:return: a list of the attributes of each child element with the child_tag tag
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_attributes(element,child_tag)
def get_children_list(self,id):
"""Returns a list of dicts representing each child element
:param id: an element id
:type id: str
:return: a list of dicts {'tag':(str),'text':(str),'attributes':(dict)}
:rtype: list
"""
st='//gbxml:*[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_children_list(element)
# campus query methods
def get_campus_location_tags(self,id):
"""Returns the child tags of the Location element of a campus
:param id: a Campus element id
:type id: str
:return: a list of the tags of the Location element
:rtype: list
"""
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tags(element)
def get_campus_location_tag_text(self,id,child_tag):
"""Returns the text of Location child elements of a campus
:param id: a Campus element id
:type id: str
:param child_tag: a tag of a child element of the Location element
:type child_tag: str
:return: a list of the text of child elements of the Location element
with the child_tag tag
:rtype: list
"""
st='./gbxml:Campus[@id="%s"]/gbxml:Location' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
return xml_functions.get_child_tag_text(element,child_tag)
# building query methods
def get_building_space_ids(self,id):
"""Returns the ids of all spaces in a building
:param id: a Building element id
:type id: str
:return: a list of Space ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get space ids
return building_functions.get_space_ids(element)
def get_building_surface_ids(self,id):
"""Returns the ids of all surfaces in a building
:param id: a Building element id
:type id: str
:return: a list of Surface ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface ids
return building_functions.get_surface_ids(element)
# space query methods
def get_space_surface_ids(self,id):
"""Returns the ids of all surfaces adjacent to a space
:param id: a Space element id
:type id: str
:return: a list of surface ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Building/gbxml:Space[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface ids
return space_functions.get_surface_ids(element)
# construction query methods
def get_construction_layer_ids(self,id):
"""Returns the layer ids of a construction
:param id: a Construction element id
:type id: str
:return: a list of layer ids
:rtype: list
"""
# get element from id
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get layer ids
return construction_functions.get_layer_ids(element)
def get_construction_material_ids(self,id):
"""Returns the material ids of a construction
:param id: a Construction element id
:type id: str
:return: a list of material ids
:rtype: list
"""
# get element from id
st='./gbxml:Construction[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get material ids
return construction_functions.get_material_ids(element)
# layer query methods
def get_layer_material_ids(self,id):
"""Returns the material ids of a construction
:param id: a Layer element id
:type id: str
:return: a list of material ids
:rtype: list
"""
# get element from id
st='./gbxml:Layer[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get layer ids
return layer_functions.get_material_ids(element)
# surface query methods
def get_surface_inner_space_id(self,id):
"""Returns the inner space id of a surface
:param id: a Surface element id
:type id: str
:return: the inner Space id
:rtype: str or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get inner space id
return surface_functions.get_inner_space_id(element)
def get_surface_outer_space_id(self,id):
"""Returns the outer space id of a surface
:param id: a Surface element id
:type id: str
:return: the outer Space id
:rtype: str or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get outer space id
return surface_functions.get_outer_space_id(element)
def get_surface_azimuth(self,id):
"""Returns the azimuth of a surface
:param id: a Surface element id
:type id: str
:return: the azimuth value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get azimuth
return surface_functions.get_azimuth(element)
def get_surface_tilt(self,id):
"""Returns the tilt of a surface
:param id: a Surface element id
:type id: str
:return: the tilt value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get tilt
return surface_functions.get_tilt(element)
def get_surface_coordinates(self,id):
"""Returns the coordinates of a surface
:param id: a Surface element id
:type id: str
:return: a list of coordinate tuples (x,y,z)
:rtype: list (of tuples)
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get coordinates
return surface_functions.get_coordinates(element)
def get_surface_area(self,id):
"""Returns the area of a surface
This is calculated using the surface coordiantes and includes the
area of any openings.
:param id: a Surface element id
:type id: str
:return: the area value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get area
return surface_functions.get_area(element)
def get_surface_opening_ids(self,id):
"""Returns the opening ids of a surface
:param id: a Surface element id
:type id: str
:return: a list of Opening ids
:rtype: list
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get opening ids
return surface_functions.get_opening_ids(element)
# opening query methods
def get_opening_surface_id(self,id):
"""Returns the parent surface id of an opening
:param id: a Opening element id
:type id: str
:return: a Surface id
:rtype: str
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get surface id
return opening_functions.get_surface_id(element)
def get_opening_coordinates(self,id):
"""Returns the coordinates of an opening
:param id: a Opening element id
:type id: str
:return: a list of coordinate tuples (x,y,z)
:rtype: list (of tuples)
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get coordinates
return opening_functions.get_coordinates(element)
def get_opening_area(self,id):
"""Returns the area of an opening
:param id: a Opening element id
:type id: str
:return: the area value
:rtype: float or None
"""
# get element from id
st='./gbxml:Campus/gbxml:Surface/gbxml:Opening[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get area
return opening_functions.get_area(element)
# zone query methods
def get_zone_space_ids(self,id):
"""Returns the ids of all spaces in a zone
:param id: a Zone element id
:type id: str
:return: a list of Space ids
:rtype: list
"""
# get element from id
st='./gbxml:Zone[@id="%s"]' % id
element=self._ElementTree.getroot().xpath(st,namespaces=self.ns)[0]
# get space ids
return zone_functions.get_space_ids(element)
## OUTPUT
#
#
# def __xmlstring(self,element=None):
# """Returns a string of an xml element
#
# Arguments:
# - element (lxml.etree._Element): default is root node
#
# """
# if element is None: element=self.root()
# return etree.tostring(element,pretty_print=True).decode()
#
#
# def xpath(self,element,st_xpath):
# """Returns the result of an xpath operation on the gbXML file
#
# Arguments
# - st_xpath (str): the xpath string
# - element (lxml.etree._Element): the element for the xpath operation. The
# default is the root element
#
# """
# return element.xpath(st_xpath,namespaces=self.ns)
#
#
# def write(self,fp):
# """Writes the gbXML file to disc
#
# Arguments:
# fp (str): the filepath
# """
# st=etree.tostring(self.root(),xml_declaration=True)
# with open(fp,'wb') as f:
# f.write(st)
#
## VALIDATION
#
# def validate(self):
# """Validates the gbXMl file using the schema
#
# Returns True if the gbXML file is valid, otherwise False
#
# """
# xmlschema = etree.XMLSchema(self.gbxsd._ElementTree)
# result=xmlschema.validate(self._ElementTree)
# return result
#
## EDITING
#
# def add_element(self,parent_element,label,text=None,**kwargs):
# """Adds an element to the gbXML
#
# Returns the newly created element
#
# Arguments:
# - parent_element (lxml._Element or str): the parent element that the
# new element is added to. This can be either a lxml._Element object
# or a string with the element id.
# - label (str): the label or tag of the new element
# - text (str): the text of the new element
# - **kwargs (keywords): the attributes of the new element
#
# """
# if isinstance(parent_element,str):
# parent_element=self.element(parent_element)
# e=etree.SubElement(parent_element,'{%s}%s' % (self.ns['gbxml'],label))
# if text: e.text=text
# if kwargs:
# for k,v in kwargs.items():
# e.set(k,v)
# return e
#
# def set_attribute(self,element,key,value):
# """Sets the attribute of an element
#
# Returns the modified element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): the name of the attribute
# - value (str): the value of the attribute
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.set(key,value)
# return element
#
#
# def set_element_id(self,element,new_id):
# """Sets a new id attribute for an element and updates all links
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - new_id (str):
#
# Return value:
# - new_id (str)
#
# """
# #check if new_id already exists
# l=self.elements()
# ids=[x.get('id') for x in l if x.get('id')]
# if new_id in ids:
# raise ValueError('new_id %s already exists' % new_id)
#
# #get element
# if isinstance(element,str):
# element=self.element(element)
#
# #get old id
# old_id=element.get('id')
#
# #set new id
# element.set('id',new_id)
#
# #find all elements with attribute labelRefId=old_id
# label=self.label(element)
# prefix=label[0].lower()+label[1:]
# st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,old_id)
# l=self.xpath(self.root(),st)
#
# #update with id
# for e in l:
# e.set('%sIdRef' % prefix,new_id)
# #return new id
# return new_id
#
#
# def set_text(self,element,text):
# """Sets the text of an element
#
# Returns the modified element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - text (str): the text
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.text=text
# return element
#
#
# def remove_element(self,element):
# """Removes an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# """
# if isinstance(element,str):
# element=self.element(element)
#
# #remove links to element
# id=element.get('id')
# label=self.label(element)
# prefix=label[0].lower()+label[1:]
# st='.//gbxml:*[@%sIdRef="%s"]' % (prefix,id)
# l=self.xpath(self.root(),st)
# for x in l:
# self.remove_attribute(x,'%sIdRef' % prefix)
#
# #remove element
# parent=element.getparent()
# parent.remove(element)
#
#
# def remove_attribute(self,element,key):
# """Removes an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - key (str): The name of the attribute to delete
#
# """
# if isinstance(element,str):
# element=self.element(element)
# element.attrib.pop(key)
#
#
# def remove_text(self,element):
# pass
#
#
#
## QUERYING
#
# def elements(self,label='*'):
# """Returns the elements of the gbXML file
#
# Arguments:
# - label (str): the label of the elements
#
# """
# st='//gbxml:%s' % label
# return self.xpath(self.root(),st)
#
#
# def root(self):
# "Returns the root element"
# return self._ElementTree.getroot()
#
#
# def element(self,id,label='*'):
# """Returns an element from the gbXML file
#
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
# st='//gbxml:%s[@id="%s"]' % (label,id)
# try:
# return self.xpath(self.root(),st)[0]
# except IndexError:
# raise KeyError('there is no element with an id of %s' % id)
#
#
# def label(self,element):
# """Returns the label of an element
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return element.tag.split('}')[1]
#
#
# def attributes(self,element):
# """Returns the attributes of an element
#
# Return value is a dictionary
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return dict(element.attrib)
#
#
# def text(self,element):
# """Returns the text of an element, or None
#
# Return value is a string
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# if isinstance(element,str):
# element=self.element(element)
# return element.text
#
#
# def text_value(self,element):
# """Returns the text value of an element, i.e the text converted
# according to its schema data type
#
# Return value is an object with data type dependent on the schema
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
#
# #JUST RETURNS STRINGS AT PRESENT - TO DO
#
# if isinstance(element,str):
# element=self.element(element)
# text=element.text
# return text
#
#
# def child_elements(self,element,label='*'):
# """Returns the child elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# if isinstance(element,str):
# element=self.element(element)
# st='./gbxml:%s' % label
# return self.xpath(element,st)
#
#
# def descendent_elements(self,element,label='*'):
# """Returns the descendent elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# if isinstance(element,str):
# element=self.element(element)
# st='.//gbxml:%s' % label
# return self.xpath(element,st)
#
#
## CONSTRUCTION FUNCTIONS
#
# def construction_layers(self,construction_element):
# "Returns the layer elements of a construction"
# if isinstance(construction_element,str):
# construction_element=self.element(construction_element,label='Construction')
# layerId_elements=self.child_elements(construction_element,'LayerId')
# layer_elements=[self.element(layerId_element.get('layerIdRef'),'Layer')
# for layerId_element in layerId_elements]
# return layer_elements
#
# def construction_materials(self,construction_element):
# "Returns the layer elements of a construction"
# if isinstance(construction_element,str):
# construction_element=self.element(construction_element,label='Construction')
# layer_elements=self.construction_layers(construction_element)
# material_elements=[]
# for layer_element in layer_elements:
# material_elements+=self.layer_materials(layer_element)
# return material_elements
#
#
## LAYER FUNCTIONS
#
# def layer_materials(self,layer_element):
# "Returns the layer elements of a construction"
# if isinstance(layer_element,str):
# layer_element=self.element(layer_element,label='Layer')
# materialId_elements=self.child_elements(layer_element,'MaterialId')
# material_elements=[self.element(materialId_element.get('materialIdRef'),'Material')
# for materialId_element in materialId_elements]
# return material_elements
#
#
#
## OPENING FUNCTIONS
#
# def opening_coordinates(self,opening_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - opening_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# if isinstance(opening_element,str):
# opening_element=self.element(opening_element,label='Opening')
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=self.xpath(opening_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=self.xpath(cartesian_point,st)
# t=(float(self.text_value(coordinates[0])),
# float(self.text_value(coordinates[1])),
# float(self.text_value(coordinates[2])))
# l.append(t)
# return l
#
## SURFACE FUNCTIONS
#
# def surface_azimuth(self,surface_element):
# """Returns the azimuth of a surface
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - azimuth (float) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Azimuth')
# if len(l)>0:
# azimuth=l[0]
# return float(self.text_value(azimuth))
#
#
# def surface_coordinates(self,surface_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=self.xpath(surface_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=self.xpath(cartesian_point,st)
# t=(float(self.text_value(coordinates[0])),
# float(self.text_value(coordinates[1])),
# float(self.text_value(coordinates[2])))
# l.append(t)
# return l
#
#
# def surface_inner_space(self,surface_element):
# """Returns the inner Space element of a Surface, or None
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>0:
# adjacentSpaceId=adjacentSpaceIds[0]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return self.element(spaceIdRef)
#
#
# def surface_outer_space(self,surface_element):
# """Returns the outer Space element of a Surface, or None
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - space (lxml._Element) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# adjacentSpaceIds=self.child_elements(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>1:
# adjacentSpaceId=adjacentSpaceIds[1]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return self.element(spaceIdRef)
#
#
# def surface_tilt(self,surface_element):
# """Returns the tilt of a surface
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - tilt (float) or None
#
# """
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# l=self.xpath(surface_element,'./gbxml:RectangularGeometry/gbxml:Tilt')
# if len(l)>0:
# tilt=l[0]
# return float(self.text_value(tilt))
#
# def surface_construction(self,surface_element):
# "Returns the construction element of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_id=surface_element.get('constructionIdRef')
# construction_element=self.element(construction_id,'Construction')
# return construction_element
#
# def surface_layers(self,surface_element):
# "Returns the layer elements of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_element=self.surface_construction(surface_element)
# layer_elements=self.construction_layers(construction_element)
# return layer_elements
#
# def surface_materials(self,surface_element):
# "Returns the layer elements of a surface"
# if isinstance(surface_element,str):
# surface_element=self.element(surface_element,label='Surface')
# construction_element=self.surface_construction(surface_element)
# material_elements=self.construction_materials(construction_element)
# return material_elements
#
#
#
#
#
#
#
#
#
#
### SPACE FUNCTIONS
##
## def set_space_id(self,space_element,id):
## """Sets a new id attribute for a Space element and updates all links
##
##
## """
## if isinstance(space_element,str):
## space_element=self.element(space_element)
## #get old id
## old_id=space_element.get('id')
## #set new id
## space_element.set('id',id)
## #find all elements with attribute spaceRefId=old_id
## st='.//gbxml:*[@spaceIdRef="%s"]' % old_id
## l=self.xpath(self.root(),st)
## #update with id
## for e in l:
## e.set('spaceIdRef',id)
## #return new id
## return id
#
#
## WINDOWTYPE FUNCTIONS
#
# def windowType_materials(self,windowType_element):
# """Returns the Glaze and Gap elements of a windowType in order
#
# Arguments:
# - windowType_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
#
# Return value:
# - glaze_and_gap_elements (list)
#
# """
# l=[]
# if isinstance(windowType_element,str):
# windowType_element=self.element(windowType_element,label='WindowType')
# l=self.child_elements(windowType_element)
# return [x for x in l if self.label(x) in ['Glaze','Gap']]
#
#
## ZONE FUNCTIONS
#
# def add_zone(self,zone_id,space_ids):
# """Adds a zone element and the IdRef links to it.
#
# Arguments:
# - zone_id (str): the id of the new zone
# - space_ids (str or list): the ids of the spaces that link to the zone
# """
# #adds element
# parent=self.root()
# e=self.add_element(parent,'Zone')
# self.set_attribute(e,'id',zone_id)
# #adds links
# if isinstance(space_ids,str):
# space_ids=[space_ids]
# for space_id in space_ids:
# space=self.element(space_id,'Space')
# self.set_attribute(space,'zoneIdRef',zone_id)
# #returns the new zone element
# return e
#
#
# def remove_zone(self,zone_element):
# """Removes a Zone element and all IdRef links to the zone.
#
# Arguments:
# - zone_element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# """
# #find id
# if isinstance(zone_element,str):
# id=zone_element
# else:
# id=zone_element.get('id')
# #find all elements with attribute zoneRefId=id
# st='.//gbxml:*[@zoneIdRef="%s"]' % id
# l=self.xpath(self.root(),st)
# #removes all attributes zoneRefId=id
# for x in l:
# self.remove_attribute(x,'zoneIdRef')
# #remove node
# self.remove_element(zone_element)
#
#
#
#
# # LAYERS
#
#
#
## OUTPUT
#
#def xpath(element,st_xpath):
# """Returns the result of an xpath operation on the gbXML file
#
# Arguments
# - st_xpath (str): the xpath string
# - element (lxml.etree._Element): the element for the xpath operation. The
# default is the root element
#
# """
# return element.xpath(st_xpath,namespaces=ns)
#
## QUERYING
#
#def get_child(element,id=None,label='*'):
# """Returns the child of an element
#
# Arguments:
# - id (str): the id of the element
# - label (str): the label of the element
#
# """
# if id is None:
# return get_children(element,label)[0]
# else:
# st='./gbxml:%s[@id="%s"]' % (label,id)
# return xpath(element,st)[0]
#
#
#def get_child_text(element,label='*',dtype=None):
# "Returns the first child text value, or None"
# children=get_children(element,label)
# if children:
# if dtype is None:
# return children[0].text
# else:
# return dtype(children[0].text)
# else:
# return None
#
#def get_children(element,label='*'):
# """Returns the child elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element or str): This a lxml._Element object
# or a string with the element id.
# - label (str): the label of the element
# """
# st='./gbxml:%s' % label
# return xpath(element,st)
#
#def get_descendents(element,label='*'):
# """Returns the descendent elements of an element
#
# Return value is a list of elements
#
# Arguments:
# - element (lxml._Element): This a lxml._Element object
# - label (str): the label of the element
# """
# st='.//gbxml:%s' % label
# return xpath(element,st)
#
#def get_element(element,id,label='*'):
# """Returns an element from the gbXML file
# """
# st='//gbxml:%s[@id="%s"]' % (label,id)
# return xpath(element.getroottree(),st)[0]
#
#
## CONSTRUCTION FUNCTIONS
#
#def construction_layers(construction_element):
# "Returns the layer elements of a construction"
# layerId_elements=get_children(construction_element,'LayerId')
# layer_elements=[get_layer(layerId_element,
# layerId_element.get('layerIdRef'))
# for layerId_element in layerId_elements]
# return layer_elements
#
#def construction_materials(construction_element):
# "Returns the layer elements of a construction"
# layer_elements=construction_layers(construction_element)
# material_elements=[]
# for layer_element in layer_elements:
# material_elements+=layer_materials(layer_element)
# return material_elements
#
#
## LAYER FUNCTIONS
#
#def get_layer(element,id):
# root=element.getroottree()
# result=xpath(root,'./gbxml:Layer[@id="%s"]' % id)
# return result[0]
#
#def layer_materials(layer_element):
# "Returns the layer elements of a construction"
# materialId_elements=get_children(layer_element,'MaterialId')
# material_elements=[get_element(materialId_element,
# materialId_element.get('materialIdRef'),
# 'Material')
# for materialId_element in materialId_elements]
# return material_elements
#
## MATERIAL FUNCTIONS
#
#def get_material(element,id):
# root=element.getroottree()
# result=xpath(root,'./gbxml:Material[@id="%s"]' % id)
# return result[0]
#
#
## SURFACE FUNCTION
#
#def get_surface_coordinates(surface_element):
# """Returns a list of coordinate tuples
#
# Arguments:
# - surface_element (lxml._Element or str): This a lxml._Element object
#
# Return value:
# - coordinates (list): a list where each item is a tuple of (x,y,z) coordinates.
# i.e. [(x1,y1,z1),(x2,y2,z2),(x3,y3,z3),...]
# or None
#
# """
# l=[]
# st='./gbxml:PlanarGeometry/gbxml:PolyLoop/gbxml:CartesianPoint'
# cartesian_points=xpath(surface_element,st)
# for cartesian_point in cartesian_points:
# st='./gbxml:Coordinate'
# coordinates=xpath(cartesian_point,st)
# t=(float(coordinates[0].text),
# float(coordinates[1].text),
# float(coordinates[2].text))
# l.append(t)
# return l
#
#def get_surface_inner_space(surface_element):
# """Returns the inner Space element of a Surface, or None
# """
# adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>0:
# adjacentSpaceId=adjacentSpaceIds[0]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return get_element(surface_element,spaceIdRef)
#
#def get_surface_outer_space(surface_element):
# """Returns the outer Space element of a Surface, or None
# """
# adjacentSpaceIds=get_children(surface_element,label='AdjacentSpaceId')
# if len(adjacentSpaceIds)>1:
# adjacentSpaceId=adjacentSpaceIds[1]
# spaceIdRef=adjacentSpaceId.get('spaceIdRef')
# return get_element(surface_element,spaceIdRef)
#
#
#
#
#
#
#
#
## def child_node_text(self,id,label='*'):
## """Returns a dictionary listing any child nodes which have text
##
## Return values is {tag:text}
##
## """
## e=self._element(id,label)
## d={}
## for e1 in e:
## if e1.text:
## label=e1.tag.split('}')[1]
## d[label]=e1.text
## return d
##
##
## def child_node_values(self,id,label='*'):
## """Returns a dictionary listing any child nodes which have text
##
## Node text values are converted from strings into their datatype
## i.e. the text from an 'Area' node is converted into a float
##
## Return values is {label:value}
##
## """
## d=self.xml.child_node_text(id=id,label=label)
## d1={}
## for k,v in d.items():
## xml_type=self.xsd.element_type(k)
## #print(xml_type)
## if xml_type=='xsd:string':
## value=v
## elif xml_type=='xsd:decimal':
## value=float(v)
## else:
## raise Exception(xml_type)
## d1[k]=value
## return d1
##
##
##
## def node_attributes(self,id,label='*'):
## "Returns the attribute dict of node with id 'id'"
## e=self._element(id,label)
## return dict(e.attrib)
##
##
## def node_ids(self,label='*'):
## """Returns the ids of all nodes
##
## Arguments:
## label (str): the node tag to filter on
##
## """
## #filter by label
## st='//a:%s' % (label)
## l=self._ElementTree.getroot().xpath(st,namespaces=self.ns)
## return [x.get('id') for x in l]
##
##
## def parent_object(self,id,label='*'):
## """Returns the parent of an element
##
## Return value is a dictionary {'id':value,'label':value}
##
## """
## e=self._element(id,label)
## parent=e.getparent()
## return {'id':self._id(parent),
## 'label':self._label(parent)}
##
##
##
##
##
## def surface_adjacent_objects(self,id):
## """Returns the objects adjacent to the surface
##
## Return value is a 2 item list of dictionaries [{'id':value,'label':value}]
##
## """
## label='Surface'
## e=self._element(id,label)
## st='./a:AdjacentSpaceId/@spaceIdRef'
## l=e.xpath(st,namespaces=self.ns)
## l=l+[None]*(2-len(l))
## surfaceType=e.get('surfaceType')
## d=\
## {'InteriorWall':None,
## 'ExteriorWall':{'id':'Climate1','label':'Climate'},
## 'Roof':{'id':'Climate1','label':'Climate'},
## 'InteriorFloor':None,
## 'ExposedFloor':{'id':'Climate1','label':'Climate'},
## 'Shade':{'id':'Climate1','label':'Climate'},
## 'UndergroundWall':{'id':'Ground1','label':'Ground'},
## 'UndergroundSlab':{'id':'Ground1','label':'Ground'},
## 'Ceiling':None,
## 'Air':None,
## 'UndergroundCeiling':{'id':'Ground1','label':'Ground'},
## 'RaisedFloor':{'id':'Climate1','label':'Climate'},
## 'SlabOnGrade':{'id':'Ground1','label':'Ground'},
## 'FreestandingColumn':None,
## 'EmbeddedColumn':None
## }
## l1=[]
## for x in l:
## if not x is None:
## l1.append({'id':x,'label':'Space'})
## else:
## l1.append(d[surfaceType])
## return l1
##
##
## def surface_building_ids(self,id):
## """Returns a list of building ids that the surface belongs to
## """
## l=self.surface_adjacent_objects(id)
## l=[self.parent_object(x['id'])['id'] for x in l if x['label']=='Space']
## return l
##
##
##
#
## def elements(xml, tag='*'):
## """Returns a list of lxml elements, filtered by tag
##
## Arguments:
## xml (lxml.etree._ElementTree): the gbXML instance
## tag (str): the tag name, not including the namespace
##
## """
## st='//a:%s' % (tag)
## #print(st)
## return xml.getroot().xpath(st,namespaces=ns)
#
#
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import sys
import six
from six.moves import range
if sys.version_info[0] < 3:
try:
from io import StringIO as BytesIO
except ImportError:
from io import StringIO as BytesIO
import six.moves.copyreg as copyreg
else:
from io import BytesIO
import copyreg
import struct
import weakref
from google.net.proto2.python.internal import containers
from google.net.proto2.python.internal import decoder
from google.net.proto2.python.internal import encoder
from google.net.proto2.python.internal import enum_type_wrapper
from google.net.proto2.python.internal import message_listener as message_listener_mod
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.internal import wire_format
from google.net.proto2.python.public import descriptor as descriptor_mod
from google.net.proto2.python.public import message as message_mod
from google.net.proto2.python.public import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(bases, descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
return bases
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number), None)
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor),
field_descriptor if field_descriptor.containing_oneof is not None
else None)
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
self._oneofs = {}
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in six.iteritems(kwargs):
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
for val in field_value:
copy.add().MergeFrom(val)
else:
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
try:
copy.MergeFrom(field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
def getter(self):
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def field_setter(self, new_value):
self._fields[field] = type_checker.CheckValue(new_value)
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof is not None:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = message_type._concrete_class()
field_value._SetListener(
_OneofListener(self, field)
if field.containing_oneof is not None
else self._listener_for_children)
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in six.iteritems(extension_dict):
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle
if _IsMessageSetExtension(handle):
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in six.iteritems(self._fields) if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
for field in message_descriptor.oneofs:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if not self.ListFields() == other.ListFields():
return False
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = BytesIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None))
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
if field_desc:
self._UpdateOneofState(field_desc)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()):
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = []
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in six.iteritems(msg._fields):
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
self._parent_message_weakref._Modified()
except ReferenceError:
pass
class _OneofListener(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
return extension_handle.default_value
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
type_checker = type_checkers.GetTypeChecker(
extension_handle)
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from REL.ner.base import NERBase, Span
from REL.ner.flair_wrapper import load_flair_ner
from REL.ner.ngram import Cmns
|
import discord
from discord.ext import commands
from discord.ext.commands import has_permissions, MissingPermissions
import json
import asyncio
bot = commands.Bot(command_prefix=".")
bot.remove_command("help")
@bot.event
async def on_ready():
print("Bot running with:")
print("Username: ", bot.user.name)
print("User ID: ", bot.user.id)
bot.load_extension('cogs.tickets')
bot.run("TOKEN")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-01-14 21:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('dhis2', '0005_delete_jsonapilog'),
]
operations = [
migrations.CreateModel(
name='SQLDhis2Connection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255, unique=True)),
('server_url', models.CharField(max_length=255, null=True)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255, null=True)),
('skip_cert_verify', models.BooleanField(default=False)),
],
),
]
|
"""Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import enum
import functools
import itertools
import logging
import re
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import LinkCollector, parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.req import InstallRequirement
from pip._internal.utils._log import getLogger
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
logger = getLogger(__name__)
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
def _check_link_requires_python(
link: Link,
version_info: Tuple[int, int, int],
ignore_requires_python: bool = False,
) -> bool:
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python,
version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python,
link,
)
else:
if not is_compatible:
version = ".".join(map(str, version_info))
if not ignore_requires_python:
logger.verbose(
"Link requires a different Python (%s not in: %r): %s",
version,
link.requires_python,
link,
)
return False
logger.debug(
"Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
version,
link.requires_python,
link,
)
return True
class LinkType(enum.Enum):
candidate = enum.auto()
different_project = enum.auto()
yanked = enum.auto()
format_unsupported = enum.auto()
format_invalid = enum.auto()
platform_mismatch = enum.auto()
requires_python_mismatch = enum.auto()
class LinkEvaluator:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name: str,
canonical_name: str,
formats: FrozenSet[str],
target_python: TargetPython,
allow_yanked: bool,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
"""
Determine whether a link is a candidate for installation.
:return: A tuple (result, detail), where *result* is an enum
representing whether the evaluation found a candidate, or the reason
why one is not found. If a candidate is found, *detail* will be the
candidate's version string; if one is not found, it contains the
reason the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or "<none given>"
return (LinkType.yanked, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (LinkType.format_unsupported, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
return (
LinkType.format_unsupported,
f"unsupported archive format: {ext}",
)
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = f"No binaries permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if "macosx10" in link.path and ext == ".zip":
return (LinkType.format_unsupported, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (
LinkType.format_invalid,
"invalid wheel filename",
)
if canonicalize_name(wheel.name) != self._canonical_name:
reason = f"wrong project name (not {self.project_name})"
return (LinkType.different_project, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = ", ".join(wheel.get_formatted_file_tags())
reason = (
f"none of the wheel's tags ({file_tags}) are compatible "
f"(run pip debug --verbose to show compatible tags)"
)
return (LinkType.platform_mismatch, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f"No sources permitted for {self.project_name}"
return (LinkType.format_unsupported, reason)
if not version:
version = _extract_version_from_fragment(
egg_info,
self._canonical_name,
)
if not version:
reason = f"Missing project version for {self.project_name}"
return (LinkType.format_invalid, reason)
match = self._py_version_re.search(version)
if match:
version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (
LinkType.platform_mismatch,
"Python version is incorrect",
)
supports_python = _check_link_requires_python(
link,
version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
reason = f"{version} Requires-Python {link.requires_python}"
return (LinkType.requires_python_mismatch, reason)
logger.debug("Found link %s, version: %s", link, version)
return (LinkType.candidate, version)
def filter_unallowed_hashes(
candidates: List[InstallationCandidate],
hashes: Hashes,
project_name: str,
) -> List[InstallationCandidate]:
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
"Given no hashes to check %s links for project %r: "
"discarding no candidates",
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = "discarding no candidates"
else:
discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
"\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
"Checked %s links for project %r against %s hashes "
"(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message,
)
return filtered
class CandidatePreferences:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
) -> None:
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates: List[InstallationCandidate],
applicable_candidates: List[InstallationCandidate],
best_candidate: Optional[InstallationCandidate],
) -> None:
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self) -> Iterable[InstallationCandidate]:
"""Iterate through all candidates."""
return iter(self._candidates)
def iter_applicable(self) -> Iterable[InstallationCandidate]:
"""Iterate through the applicable candidates."""
return iter(self._applicable_candidates)
class CandidateEvaluator:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name: str,
target_python: Optional[TargetPython] = None,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> "CandidateEvaluator":
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name: str,
supported_tags: List[Tag],
specifier: specifiers.BaseSpecifier,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
hashes: Optional[Hashes] = None,
) -> None:
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
# Since the index of the tag in the _supported_tags list is used
# as a priority, precompute a map from tag to index/priority to be
# used in wheel.find_most_preferred_tag.
self._wheel_tag_preferences = {
tag: idx for idx, tag in enumerate(supported_tags)
}
def get_applicable_candidates(
self,
candidates: List[InstallationCandidate],
) -> List[InstallationCandidate]:
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v)
for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [c for c in candidates if str(c.version) in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag: BuildTag = ()
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
try:
pri = -(
wheel.find_most_preferred_tag(
valid_tags, self._wheel_tag_preferences
)
)
except ValueError:
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash,
yank_value,
binary_preference,
candidate.version,
pri,
build_tag,
)
def sort_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> Optional[InstallationCandidate]:
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> BestCandidateResult:
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector: LinkCollector,
target_python: TargetPython,
allow_yanked: bool,
use_deprecated_html5lib: bool,
format_control: Optional[FormatControl] = None,
candidate_prefs: Optional[CandidatePreferences] = None,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self._use_deprecated_html5lib = use_deprecated_html5lib
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector: LinkCollector,
selection_prefs: SelectionPreferences,
target_python: Optional[TargetPython] = None,
*,
use_deprecated_html5lib: bool,
) -> "PackageFinder":
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
use_deprecated_html5lib=use_deprecated_html5lib,
)
@property
def target_python(self) -> TargetPython:
return self._target_python
@property
def search_scope(self) -> SearchScope:
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope: SearchScope) -> None:
self._link_collector.search_scope = search_scope
@property
def find_links(self) -> List[str]:
return self._link_collector.find_links
@property
def index_urls(self) -> List[str]:
return self.search_scope.index_urls
@property
def trusted_hosts(self) -> Iterable[str]:
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self) -> bool:
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self) -> None:
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self) -> bool:
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self) -> None:
self._candidate_prefs.prefer_binary = True
def requires_python_skipped_reasons(self) -> List[str]:
reasons = {
detail
for _, result, detail in self._logged_links
if result == LinkType.requires_python_mismatch
}
return sorted(reasons)
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen: Set[Link] = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
entry = (link, result, detail)
if entry not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug("Skipping link: %s: %s", detail, link)
self._logged_links.add(entry)
def get_install_candidate(
self, link_evaluator: LinkEvaluator, link: Link
) -> Optional[InstallationCandidate]:
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
result, detail = link_evaluator.evaluate_link(link)
if result != LinkType.candidate:
self._log_skipped_link(link, result, detail)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=detail,
)
def evaluate_links(
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
) -> List[InstallationCandidate]:
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
"Fetching project page and analyzing links: %s",
project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
link_evaluator = self.make_link_evaluator(project_name)
collected_sources = self._link_collector.collect_sources(
project_name=project_name,
candidates_from_page=functools.partial(
self.process_project_url,
link_evaluator=link_evaluator,
),
)
page_candidates_it = itertools.chain.from_iterable(
source.page_candidates()
for sources in collected_sources
for source in sources
if source is not None
)
page_candidates = list(page_candidates_it)
file_links_it = itertools.chain.from_iterable(
source.file_links()
for sources in collected_sources
for source in sources
if source is not None
)
file_candidates = self.evaluate_links(
link_evaluator,
sorted(file_links_it, reverse=True),
)
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
paths = []
for candidate in file_candidates:
assert candidate.link.url # we need to have a URL
try:
paths.append(candidate.link.file_path)
except Exception:
paths.append(candidate.link.url) # it's not a local file
logger.debug("Local files found: %s", ", ".join(paths))
# This is an intentional priority ordering
return file_candidates + page_candidates
def make_candidate_evaluator(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
"""Create a CandidateEvaluator object to use."""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> BestCandidateResult:
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(
self, req: InstallRequirement, upgrade: bool
) -> Optional[InstallationCandidate]:
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name,
specifier=req.specifier,
hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version: Optional[_BaseVersion] = None
if req.satisfied_by is not None:
installed_version = req.satisfied_by.version
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return (
", ".join(
sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)
)
or "none"
)
if installed_version is None and best_candidate is None:
logger.critical(
"Could not find a version that satisfies the requirement %s "
"(from versions: %s)",
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
"No matching distribution found for {}".format(req)
)
best_installed = False
if installed_version and (
best_candidate is None or best_candidate.version <= installed_version
):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
"Existing installed version (%s) is most up-to-date and "
"satisfies requirement",
installed_version,
)
else:
logger.debug(
"Existing installed version (%s) satisfies requirement "
"(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
"Installed version (%s) is most up-to-date (past versions: %s)",
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
"Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
|
from .aaa_util import eval_results, get_summary, convert_df
class AnchorDetector:
def __init__(self, offline):
self.offline = offline
def initialize(self, seq_info):
self.seq_info = seq_info
self.previous_offline = None
def fixed_detect(self, frame_idx, duration):
feedback_length = duration
if (frame_idx + 1) % duration == 0:
is_anchor, feedback = (
True,
self._get_feedback(frame_idx - duration + 1, frame_idx),
)
else:
is_anchor, feedback = False, None
return is_anchor, feedback, feedback_length
def stable_detect(self, seq_info, frame_idx, duration, threshold):
if frame_idx + 1 > duration:
current_offline = self._get_feedback(frame_idx - duration + 1, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = self.previous_offline[
self.previous_offline[:, 0] > 1
]
overlap_previous[:, 0] -= 1
overlap_previous = convert_df(overlap_previous, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] < duration]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = duration
else:
current_offline = self._get_feedback(0, frame_idx)
if self.previous_offline is not None and current_offline is not None:
overlap_previous = convert_df(self.previous_offline, is_offline=True)
overlap_current = current_offline[current_offline[:, 0] <= frame_idx]
overlap_current = convert_df(overlap_current, is_offline=True)
feedback_length = frame_idx + 1
if self.previous_offline is not None and current_offline is not None:
prev_acc, prev_ana, _ = eval_results(
seq_info, overlap_previous, overlap_current
)
prev_sum = get_summary(prev_acc, prev_ana)
curr_acc, curr_ana, _ = eval_results(
seq_info, overlap_current, overlap_previous
)
curr_sum = get_summary(curr_acc, curr_ana)
mean_mota = (prev_sum[3] + curr_sum[3]) / 2
if mean_mota >= threshold:
is_anchor = True
feedback = current_offline
else:
is_anchor = False
feedback = None
# print(f"Frame {frame_idx}, MOTA {mean_mota}")
else:
is_anchor = False
feedback = None
self.previous_offline = current_offline
return is_anchor, feedback, feedback_length
def _get_feedback(self, start_frame, end_frame):
try:
feedback = self.offline.track(start_frame, end_frame)
except (RuntimeError, ValueError):
feedback = None
return feedback
|
"""
Unit and regression test for the maxsmi package.
"""
# Import package, test suite, and other packages as needed
# import maxsmi
# import pytest
import sys
def test_maxsmi_imported():
"""Sample test, will always pass so long as import statement worked"""
assert "maxsmi" in sys.modules
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._assets import (
Asset,
Equity,
Future,
make_asset_array,
CACHE_FILE_TEMPLATE
)
from .assets import (
AssetFinder,
AssetConvertible,
PricingDataAssociable,
)
from .asset_db_schema import ASSET_DB_VERSION
from .asset_writer import AssetDBWriter
__all__ = [
'ASSET_DB_VERSION',
'Asset',
'AssetDBWriter',
'Equity',
'Future',
'AssetFinder',
'AssetConvertible',
'PricingDataAssociable',
'make_asset_array',
'CACHE_FILE_TEMPLATE'
]
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.sros import sros_argument_spec
from ansible.module_utils.basic import AnsibleFallbackNotFound
from ansible.module_utils.six import iteritems
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'sros'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
result = super(ActionModule, self).run(tmp, task_vars)
return result
def load_provider(self):
provider = self._task.args.get('provider', {})
for key, value in iteritems(sros_argument_spec):
if key != 'provider' and key not in provider:
if key in self._task.args:
provider[key] = self._task.args[key]
elif 'fallback' in value:
provider[key] = self._fallback(value['fallback'])
elif key not in provider:
provider[key] = None
return provider
def _fallback(self, fallback):
strategy = fallback[0]
args = []
kwargs = {}
for item in fallback[1:]:
if isinstance(item, dict):
kwargs = item
else:
args = item
try:
return strategy(*args, **kwargs)
except AnsibleFallbackNotFound:
pass
|
"""
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.5.1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from graphsense.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from graphsense.exceptions import ApiAttributeError
def lazy_import():
from graphsense.model.entity_tag import EntityTag
globals()['EntityTag'] = EntityTag
class EntityTags(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'entity_tags': ([EntityTag],), # noqa: E501
'next_page': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'entity_tags': 'entity_tags', # noqa: E501
'next_page': 'next_page', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, entity_tags, *args, **kwargs): # noqa: E501
"""EntityTags - a model defined in OpenAPI
Args:
entity_tags ([EntityTag]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
next_page (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, entity_tags, *args, **kwargs): # noqa: E501
"""EntityTags - a model defined in OpenAPI
Args:
entity_tags ([EntityTag]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
next_page (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.entity_tags = entity_tags
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
# Generated by Django 2.1.5 on 2019-03-31 18:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('stock', '0002_spare_store'),
]
operations = [
migrations.CreateModel(
name='SpareCustomer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('phone_number', models.CharField(max_length=10)),
('address', models.TextField()),
],
),
migrations.CreateModel(
name='SpareOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(max_length=20)),
('order_type', models.CharField(choices=[('IN_SOURCE', 'IN_SOURCE'), ('OUT_SOURCE', 'OUT_SOURCE')], max_length=20)),
('total', models.DecimalField(decimal_places=2, max_digits=10)),
('order_date', models.DateTimeField(auto_now=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareCustomer')),
('sold_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('store', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Store')),
],
),
migrations.CreateModel(
name='SpareSold',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spare_count', models.IntegerField()),
('spare_name', models.CharField(max_length=100)),
('spare_price', models.DecimalField(decimal_places=2, max_digits=10)),
('spare_price_type', models.CharField(choices=[('MRP', 'MRP'), ('MECHANIC', 'MECHANIC'), ('WHOLESALER', 'WHOLESALER'), ('CUSTOMER', 'CUSTOMER')], max_length=20)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stock.SpareOrder')),
('spare', models.ForeignKey(on_delete=models.SET('deleted'), to='stock.Spare')),
],
),
]
|
import os
import sys
import random
import pygame
def load_image(name, colorkey=None): # not sure if this method is needed
fullname = os.path.join('data', name)
# если файл не существует, то выходим
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname) # we can just use this one, cuz we know that pics are ok
return image
enemies = pygame.sprite.Group()
bullets = pygame.sprite.Group()
class Meteor(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.frames = []
self.cut_sheet(load_image("meteors1.png"), 5, 1)
self.cur_frame = 0
self.image = self.frames[self.cur_frame]
self.count = 0
self.mask = pygame.mask.from_surface(self.image)
self.rect.x = random.randrange(width)
self.rect.y = -1 * self.image.get_height()
while pygame.sprite.spritecollideany(self, enemies, pygame.sprite.collide_mask) or\
self.rect.x < 0 or self.rect.right > width:
self.rect.x = random.randrange(width)
self.life = 1
def cut_sheet(self, sheet, columns, rows):
self.rect = pygame.Rect(0, 0, sheet.get_width() // columns,
sheet.get_height() // rows)
for j in range(rows):
for i in range(columns):
frame_location = (self.rect.w * i, self.rect.h * j)
self.frames.append(sheet.subsurface(pygame.Rect(
frame_location, self.rect.size)))
def update(self):
if pygame.sprite.spritecollideany(self, bullets, pygame.sprite.collide_mask):
self.life -= 1
if self.life > 0 and self.rect.y <= height:
self.rect = self.rect.move(0, 1)
self.count += 1
if self.count % 7 == 0:
self.cur_frame = (self.cur_frame + 1) % len(self.frames)
self.image = self.frames[self.cur_frame]
else:
self.kill()
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == '__main__':
pygame.init()
size = width, height = 500, 700 # other parameters may be set in the main game
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
fps = 60
MYEVENTTYPE = pygame.USEREVENT + 1
pygame.time.set_timer(MYEVENTTYPE, 3000)
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MYEVENTTYPE: # every 3000 frames new enemies are created
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
screen.fill(pygame.Color('blue')) # in the main game, there will be a background(animated?)
enemies.draw(screen)
enemies.update()
clock.tick(fps)
pygame.display.flip()
pygame.quit()
sys.excepthook = except_hook
|
from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
bootstrap = Bootstrap(app)
from flask_app import routes, models
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for testing"""
import os
import sys
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.time import Time
__all__ = [
"requires_dependency",
"requires_data",
"mpl_plot_check",
"assert_quantity_allclose",
"assert_skycoord_allclose",
"assert_time_allclose",
"Checker",
]
# Cache for `requires_dependency`
_requires_dependency_cache = {}
def requires_dependency(name):
"""Decorator to declare required dependencies for tests.
Examples
--------
::
from gammapy.utils.testing import requires_dependency
@requires_dependency('scipy')
def test_using_scipy():
import scipy
...
"""
import pytest
if name in _requires_dependency_cache:
skip_it = _requires_dependency_cache[name]
else:
try:
__import__(name)
skip_it = False
except ImportError:
skip_it = True
_requires_dependency_cache[name] = skip_it
reason = f"Missing dependency: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def has_data(name):
"""Is a certain set of data available?"""
if name == "gammapy-extra":
return "GAMMAPY_EXTRA" in os.environ
elif name == "gammapy-data":
return "GAMMAPY_DATA" in os.environ
elif name == "gamma-cat":
return "GAMMA_CAT" in os.environ
elif name == "fermi-lat":
return "GAMMAPY_FERMI_LAT_DATA" in os.environ
else:
raise ValueError(f"Invalid name: {name}")
def requires_data(name="gammapy-data"):
"""Decorator to declare required data for tests.
Examples
--------
::
from gammapy.utils.testing import requires_data
@requires_data()
def test_using_data_files():
filename = "$GAMMAPY_DATA/..."
...
"""
import pytest
if not isinstance(name, str):
raise TypeError(
"You must call @requires_data with a name (str). "
"Usually this: @requires_data()"
)
skip_it = not has_data(name)
reason = f"Missing data: {name}"
return pytest.mark.skipif(skip_it, reason=reason)
def run_cli(cli, args, exit_code=0):
"""Run Click command line tool.
Thin wrapper around `click.testing.CliRunner`
that prints info to stderr if the command fails.
Parameters
----------
cli : click.Command
Click command
args : list of str
Argument list
exit_code : int
Expected exit code of the command
Returns
-------
result : `click.testing.Result`
Result
"""
from click.testing import CliRunner
result = CliRunner().invoke(cli, args, catch_exceptions=False)
if result.exit_code != exit_code:
sys.stderr.write("Exit code mismatch!\n")
sys.stderr.write("Output:\n")
sys.stderr.write(result.output)
return result
def assert_skycoord_allclose(actual, desired):
"""Assert all-close for `astropy.coordinates.SkyCoord` objects.
- Frames can be different, aren't checked at the moment.
"""
assert isinstance(actual, SkyCoord)
assert isinstance(desired, SkyCoord)
assert_allclose(actual.data.lon.deg, desired.data.lon.deg)
assert_allclose(actual.data.lat.deg, desired.data.lat.deg)
def assert_time_allclose(actual, desired, atol=1e-3):
"""Assert all-close for `astropy.time.Time` objects.
atol is absolute tolerance in seconds.
"""
assert isinstance(actual, Time)
assert isinstance(desired, Time)
assert actual.scale == desired.scale
assert actual.format == desired.format
dt = actual - desired
assert_allclose(dt.sec, 0, rtol=0, atol=atol)
def assert_quantity_allclose(actual, desired, rtol=1.0e-7, atol=None, **kwargs):
"""Assert all-close for `astropy.units.Quantity` objects.
Requires that ``unit`` is identical, not just that quantities
are allclose taking different units into account.
We prefer this kind of assert for testing, since units
should only change on purpose, so this tests more behaviour.
"""
# TODO: change this later to explicitly check units are the same!
# assert actual.unit == desired.unit
args = _unquantify_allclose_arguments(actual, desired, rtol, atol)
assert_allclose(*args, **kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = u.Quantity(actual, subok=True, copy=False)
desired = u.Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'desired' ({}) and 'actual' ({}) "
"are not convertible".format(desired.unit, actual.unit)
)
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = u.Quantity(0)
else:
atol = u.Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except u.UnitsError:
raise u.UnitsError(
"Units for 'atol' ({}) and 'actual' ({}) "
"are not convertible".format(atol.unit, actual.unit)
)
rtol = u.Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(u.dimensionless_unscaled)
except Exception:
raise u.UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
def mpl_plot_check():
"""Matplotlib plotting test context manager.
It create a new figure on __enter__ and calls savefig for the
current figure in __exit__. This will trigger a render of the
Figure, which can sometimes raise errors if there is a problem.
This is writing to an in-memory byte buffer, i.e. is faster
than writing to disk.
"""
from io import BytesIO
import matplotlib.pyplot as plt
class MPLPlotCheck:
def __enter__(self):
plt.figure()
def __exit__(self, type, value, traceback):
plt.savefig(BytesIO(), format="png")
plt.close()
return MPLPlotCheck()
class Checker:
"""Base class for checker classes in Gammapy."""
def run(self, checks="all"):
if checks == "all":
checks = self.CHECKS.keys()
unknown_checks = sorted(set(checks).difference(self.CHECKS.keys()))
if unknown_checks:
raise ValueError(f"Unknown checks: {unknown_checks!r}")
for check in checks:
method = getattr(self, self.CHECKS[check])
yield from method()
|
# coding=utf-8
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules import conv
from torch.nn.modules.utils import _single
from ..functions.max_sv import max_singular_value
class SNConv1d(conv._ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
super(SNConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _single(0), groups, bias)
self.register_buffer('u', torch.Tensor(1, out_channels).normal_())
@property
def W_(self):
w_mat = self.weight.view(self.weight.size(0), -1)
sigma, _u = max_singular_value(w_mat, self.u)
self.u.copy_(_u)
return self.weight / sigma
def forward(self, input):
return F.conv1d(input, self.W_, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Soft Actor Critic agent."""
from tf_agents.agents.sac import sac_agent
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ classifiers.py ]
# Synopsis [ 'Naive Bayes' and 'Decision Tree' training, testing, and tunning functions ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn import tree
############
# CONSTANT #
############
N_FOLD = 10
DEPTHS = np.arange(1, 64)
ALPHAS = np.arange(0.001, 1.0, 0.001)
ALPHAS_MUSHROOM = np.arange(0.0001, 1.0, 0.0001)
BEST_DISTRIBUTION = 'Multinominal'
###############
# NAIVE BAYES #
###############
class naive_bayes_runner(object):
def __init__(self, MODEL, train_x, train_y, test_x, test_y):
#---data---#
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
#---model---#
self.cross_validate = False
self.MODEL = MODEL
if self.MODEL == 'NEWS':
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.065),
'Complement' : ComplementNB(alpha=0.136),
'Bernoulli' : BernoulliNB(alpha=0.002) }
if self.MODEL == 'MUSHROOM':
ALPHAS = ALPHAS_MUSHROOM
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.0001),
'Complement' : ComplementNB(alpha=0.0001),
'Bernoulli' : BernoulliNB(alpha=0.0001) }
if self.MODEL == 'INCOME':
self.cross_validate = True
self.models = { 'Guassian' : GaussianNB(),
'Multinominal' : MultinomialNB(alpha=0.959),
'Complement' : ComplementNB(alpha=0.16),
'Bernoulli' : BernoulliNB(alpha=0.001) }
def _fit_and_evaluate(self, model):
model_fit = model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_alpha(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: pip3 install tqdm')
for distribution, model in self.models.items():
best_acc = 0.0
best_alpha = 0.001
if distribution != 'Guassian':
print('>> [Naive Bayes Runner] Searching for best alpha value, distribution:', distribution)
for alpha in tqdm(ALPHAS):
model.set_params(alpha=alpha)
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
if acc > best_acc:
best_acc = acc
best_alpha = alpha
print('>> [Naive Bayes Runner] '+ distribution + ' - Best Alpha Value:', best_alpha)
def run_best_all(self):
for distribution, model in self.models.items():
if self.cross_validate:
scores = cross_val_score(model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate(model)
print('>> [Naive Bayes Runner] '+ distribution + ' - Accuracy:', acc)
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.models[BEST_DISTRIBUTION], self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.models[BEST_DISTRIBUTION].fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate(self.models[BEST_DISTRIBUTION])
print('>> [Naive Bayes Runner] '+ BEST_DISTRIBUTION + ' - Accuracy:', acc)
return pred_y
#################
# DECISION TREE #
#################
class decision_tree_runner(object):
def __init__(self, MODEL, train_x, train_y, test_x, test_y):
#---data---#
self.train_x = train_x
self.train_y = train_y
self.test_x = test_x
self.test_y = test_y
#---model---#
self.cross_validate = False
self.MODEL = MODEL
if self.MODEL == 'NEWS':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=47,
random_state=1337)
elif self.MODEL == 'MUSHROOM':
self.model = tree.DecisionTreeClassifier(criterion='gini',
splitter='random',
max_depth=7,
random_state=1337)
elif self.MODEL == 'INCOME':
self.cross_validate = True
self.model = tree.DecisionTreeClassifier(criterion='entropy',
min_impurity_decrease=2e-4,
max_depth=15,
random_state=1337)
def _fit_and_evaluate(self):
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
acc = metrics.accuracy_score(self.test_y, pred_y)
return acc, pred_y
def search_max_depth(self):
try:
from tqdm import tqdm
except:
raise ImportError('Failed to import tqdm, use the following command to install: $ pip3 install tqdm')
best_acc = 0.0
best_depth = 1
print('>> [Naive Bayes Runner] Searching for best max depth value...')
for depth in tqdm(DEPTHS):
self.model.set_params(max_depth=depth)
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
else:
acc, _ = self._fit_and_evaluate()
if acc > best_acc:
best_acc = acc
best_depth = depth
print('>> [Decision Tree Runner] - Best Dpeth Value:', best_depth)
def visualize(self):
try:
import graphviz
except:
raise ImportError('Failed to import graphviz, use the following command to install: $ pip3 install graphviz, and $ sudo apt-get install graphviz')
model_fit = self.model.fit(self.train_x, self.train_y)
dot_data = tree.export_graphviz(model_fit, out_file=None,
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'png'
graph.render('../image/TREE_' + self.MODEL)
print('>> [Decision Tree Runner] - Tree visualization complete.')
def run_best(self):
if self.cross_validate:
scores = cross_val_score(self.model, self.train_x, self.train_y, cv=N_FOLD, scoring='accuracy')
acc = scores.mean()
model_fit = self.model.fit(self.train_x, self.train_y)
pred_y = model_fit.predict(self.test_x)
else:
acc, pred_y = self._fit_and_evaluate()
print('>> [Decision Tree Runner] - Accuracy:', acc)
return pred_y
|
from . import venv
|
class Curve_Parms():
def Curve_Parms_Paths(self):
return [str(self.a),str(self.b),str(self.c),str(self.NFrames)]
def Curve_Parms_Path(self):
return "/".join( self.Curve_Parms_Paths() )
def Curve_Parms_FileName(self,cname,fname,ext="svg"):
fnames=self.Curve_Parms_Paths()
n=fnames.pop()
paths=[self.BasePath,self.Name]
fnames=[ fname,]+fnames+[ n+"."+ext ]
fname="-".join(fnames)
paths.append( "-".join(fnames) )
return "/".join(paths)
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_uvetest.py
#
# UVE and Alarm tests
#
import os
import sys
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict, find_buildroot
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
import platform
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
builddir = find_buildroot(os.getcwd())
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(cls.redis_port)
@classmethod
def tearDownClass(cls):
mockredis.stop_redis(cls.redis_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("%%% test_00_nocassandra %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
#@unittest.skip('Skipping VM UVE test')
def test_01_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_01_vm_uve %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_01_vm_uve
#@unittest.skip('Skipping VM UVE test')
def test_02_vm_uve_with_password(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("%%% test_02_vm_uve_with_password %%%")
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
# end test_02_vm_uve_with_password
#@unittest.skip('verify redis-uve restart')
def test_03_redis_uve_restart(self):
logging.info('%%% test_03_redis_uve_restart %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', collectors, logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
self.verify_uve_resync(vizd_obj)
# Alarm should return after redis restart
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# should there be a return True here?
# end test_03_redis_uve_restart
#@unittest.skip('verify redis-uve restart')
def test_04_redis_uve_restart_with_password(self):
logging.info('%%% test_03_redis_uve_restart_with_password %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
# end test_04_redis_uve_restart
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify redis-uve list
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
# stop redis-uve
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
# start redis-uve and verify that contrail-collector and Opserver are
# connected to the redis-uve
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify that UVEs are resynced with redis-uve
assert vizd_obj.verify_generator_uve_list(gen_list)
#@unittest.skip('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('%%% test_05_collector_ha %%%')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# OpServer, AlarmGen and QE are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that OpServer, AlarmGen and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that OpServer, AlarmGen and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop QE
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start a python generator and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that they connect to the secondary collector, if the
# connection to the primary fails
vr2_collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr2_agent = self.useFixture(
GeneratorFixture("contrail-snmp-collector", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr2_agent.verify_on_setup()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api', 'contrail-snmp-collector',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
#@unittest.skip('Skipping AlarmGen basic test')
def test_06_alarmgen_basic(self):
'''
This test starts the analytics processes.
It enables partition 0 on alarmgen, and confirms
that it got enabled
'''
logging.info("%%% test_06_alarmgen_basic %%%")
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
start_kafka = True))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "process-status"))
# setup generator for sending Vrouter build_info
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute",
rules=[{"and_list": [{
"condition": {
"operation": "==",
"operand1": "ObjectVRouter.build_info",
"operand2": {
"json_value": "null"
}
},
"match": [{"json_operand1_value": "null"}]
}]}]
))
# Now try to clear the alarm by sending build_info
alarm_gen1.send_vrouterinfo("myvrouter1", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Now try to clear the alarm by deleting the UVE
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute"))
# Now try to clear the alarm by disconnecting the generator
alarm_gen2._sandesh_instance._client._connection.set_admin_state(\
down=True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "partial-sysinfo-compute", is_set = False))
# send vrouter UVE of myvrouter without build_info again !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
# Verify that we can give up partition ownership
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
# Give up the other partitions
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
# Confirm that alarms are all gone
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
# Get the partitions again
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
# The PartialSysinfo alarm om myvrouter should return
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "partial-sysinfo-compute"))
return True
# end test_06_alarmgen_basic
#@unittest.skip('Skipping Alarm test')
def test_07_alarm(self):
'''
This test starts redis, collectors, analytics-api and
python generators that simulates alarm generator. This
test sends alarms from alarm generators and verifies the
retrieval of alarms from analytics-api.
'''
logging.info('%%% test_07_alarm %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
# collector_ha_test flag is set to True, because we wanna test
# retrieval of alarms across multiple redis servers.
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True,
start_kafka = True))
assert vizd_obj.verify_on_setup()
# create alarm-generator and attach it to the first collector.
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
# send proces state alarm for control-node
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
# create another alarm-generator and attach it to the second collector.
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# delete analytics-node alarm generated by alarm_gen2
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
# verify analytics-node alarms
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
ukeys = [socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
# Disconnect alarm_gen1 from Collector and verify that all
# alarms generated by alarm_gen1 is removed by the Collector.
alarm_gen1.disconnect_from_collector()
ukeys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(analytics_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(analytics_tbl, ukeys[0], {}))
ukeys = ['<&'+socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_exclude(control_tbl,
unexpected_alms=ukeys))
assert(vizd_obj.verify_alarm(control_tbl, ukeys[0], {}))
# update analytics-node alarm in disconnect state
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
# Connect alarm_gen1 to Collector and verify that all
# alarms generated by alarm_gen1 is synced with Collector.
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list_include(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list_include(control_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# end test_07_alarm
#@unittest.skip('Skipping UVE/Alarm Filter test')
def test_08_uve_alarm_filter(self):
'''
This test verifies the filter options kfilt, sfilt, mfilt and cfilt
in the UVE/Alarm GET and POST methods.
'''
logging.info('%%% test_08_uve_alarm_filter %%%')
if AnalyticsUveTest._check_skip_kafka() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True, start_kafka = True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&']
# generate UVEs for the filter test
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
vr_agent.send_vn_agent_uve(name=vn_list[3], ipkts=8, ibytes=256)
# generate Alarms for the filter test
alarms = alarm_gen1.create_alarm('InPktsThreshold')
alarms += alarm_gen1.create_alarm('InBytesThreshold', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent', ack=False)
alarm_gen2.send_alarm(vn_list[3], alarms, VN_TABLE)
filt_test = [
# no filter
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# sfilt
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# mfilt
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# cfilt
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# ackfilt
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
}
}
]
},
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'get_alarms': {
'virtual-network': [
{ 'name' : 'default-domain:project1:vn2',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
]
} }
},
{ 'name' : 'default-domain:project2:vn1',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
{ 'name' : 'default-domain:project2:vn1&',
'value' : { 'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
} }
},
]
},
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
},
# kfilt + sfilt + ackfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1',
'default-domain:project2:vn1&'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
},
{
'name': 'default-domain:project2:vn1&',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 8,
'in_bytes': 256
}
}
}
]
},
},
# kfilt + sfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
},
{
'type': 'InBytesThreshold',
'ack': True
}
]
}
}
}
]
},
},
# kfilt + mfilt + cfilt
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
},
# kfilt + sfilt + mfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'ack': False
}
]
}
}
}
]
},
},
# kfilt + sfilt + mfilt + cfilt + ackfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1&',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1&'
],
'uve_get_post': {'value': []},
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
if 'get_alarms' in filt_test[i]:
filters['tablefilt'] = 'virtual-network'
assert(vizd_obj.verify_get_alarms(vn_table,
filts=filters, exp_uves=filt_test[i]['get_alarms']))
# end test_08_uve_alarm_filter
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_kafka():
(PLATFORM, VERSION, EXTRA) = platform.linux_distribution()
if PLATFORM.lower() == 'ubuntu':
if VERSION.find('12.') == 0:
return True
if PLATFORM.lower() == 'centos':
if VERSION.find('6.') == 0:
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
|
from .objects import Server, Zone, RRSet, Record, Comment, Cryptokey, Metadata, SearchResult, StatisticItem, \
MapStatisticItem, RingStatisticItem, SimpleStatisticItem, CacheFlushResult
from .exceptions import PDNSApiException, PDNSApiNotFound
import json
from functools import partial
import requests
import logging
logger = logging.getLogger(__name__)
# TODO:
# - Logging
# - TSIGKeys
class APIClient:
def __init__(self, api_host, api_key, tls_verify=True, request_timeout=None):
self._api_url = api_host if 'api/v1' in api_host else f"{api_host}/api/v1"
self._api_key = api_key
self._tls_verify = tls_verify
self._request_timeout = request_timeout
if not self._tls_verify:
logger.warning("Disabling TLS certificate validation.")
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.request_headers = {'X-API-Key': self._api_key}
self.get = partial(self.request, method='GET')
self.post = partial(self.request, method='POST')
self.put = partial(self.request, method='PUT')
self.patch = partial(self.request, method='PATCH')
self.delete = partial(self.request, method='DELETE')
self.servers = self._set_servers()
self.current_server = self.servers[0]
self.zones = self._set_zones()
def request(self, path: str, method: str, data=None, **kwargs):
url = f"{self._api_url}/{path.lstrip('/')}"
if data is None:
data = {}
response = requests.request(method,
url,
json=data,
headers=self.request_headers,
timeout=self._request_timeout,
verify=self._tls_verify,
**kwargs
)
try:
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError as e:
if response.status_code == 404:
raise (PDNSApiNotFound(e)) from None
try:
status_message = response.json()
status_message = status_message.get('error', status_message.get('errors', 'Unknown error'))
except:
status_message = response.text
raise PDNSApiException(response.status_code, status_message) from None
except json.decoder.JSONDecodeError:
return response.text
def _set_servers(self):
new_servers = list()
for server in self.get('servers'):
new_servers.append(Server(**server))
return new_servers
def _set_zones(self):
new_zones = list()
for zone in self.get(f'servers/{self.current_server.id}/zones'):
new_zones.append(Zone(**zone))
return new_zones
def create_zone(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones'
return Zone(**self.post(path, data=zone))
# Zones
def get_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
zone = Zone(**self.get(path))
new_rrsets = []
for rrset in zone.rrsets:
new_comments = []
new_records = []
rrset = RRSet(**rrset)
for comment in rrset.comments:
new_comments.append(Comment(**comment))
for record in rrset.records:
new_records.append(Record(**record))
rrset.comments = new_comments
rrset.records = new_records
new_rrsets.append(rrset)
zone.rrsets = new_rrsets
return zone
def delete_zone(self, zone_name):
path = f'servers/{self.current_server.id}/zones/{zone_name}'
self.delete(path)
def update_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.put(path, data=zone)
return self.get_zone(zone.name)
def patch_rrsets(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}'
self.patch(path, data={'rrsets': zone.rrsets})
return self.get_zone(zone.name)
def create_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'REPLACE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
def delete_records(self, zone: Zone, rrsets: list):
for rrset in rrsets:
rrset.changetype = 'DELETE'
zone = Zone(name=zone.name, kind=zone.kind, rrsets=rrsets)
return self.patch_rrsets(zone)
# Cryptokeys
def get_zone_cryptokeys(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
cryptkeys_new = []
for cryptokey in self.get(path):
cryptkeys_new.append(Cryptokey(**cryptokey))
return cryptkeys_new
def create_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys'
return self.post(path, data=cryptokey)
def get_cryptokey(self, zone: Zone, key_id):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{key_id}'
return Cryptokey(**self.get(path))
def put_cryptokey(self, zone: Zone, cryptokey: Cryptokey):
path = f'servers/{self.current_server.id}/zones/{zone.name}/cryptokeys/{cryptokey.id}'
self.put(path, data=cryptokey)
# Metadata
def get_zone_metadata(self, zone: Zone):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
metadata_new = []
for metadata in self.get(path):
metadata_new.append(Metadata(**metadata))
return metadata_new
def create_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata'
self.post(path, data=metadata)
return self.get_zone_metadata(zone)
def get_metadata(self, zone: Zone, metadata_kind):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata_kind}'
return Metadata(**self.get(path))
def put_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
return Metadata(**self.put(path, data=metadata))
def delete_metadata(self, zone: Zone, metadata: Metadata):
path = f'servers/{self.current_server.id}/zones/{zone.name}/metadata/{metadata.kind}'
self.delete(path)
# TSIGKeys
# FIXME TBW
# Searching
def search(self, query: str, max_results: int, object_type: str):
path = f'servers/{self.current_server.id}/search-data'
object_types = ['all', 'zone', 'record', 'comment']
if object_type not in object_types:
raise TypeError(f"object_type must be one of {', '.join(object_types)}")
if not isinstance(max_results, int):
raise TypeError("max_results needs to be an integer.")
payload = {'q': query, 'max': max_results, 'object_type': object_type}
new_results = []
for result in self.get(path, params=payload):
new_results.append(SearchResult(**result))
return new_results
# Statistics
def statistics(self, statistic=None, includerings=True):
path = f'servers/{self.current_server.id}/statistics'
payload = {'statistic': statistic, 'includerings': includerings}
type_map = {
'StatisticItem': StatisticItem,
'MapStatisticItem': MapStatisticItem,
'RingStatisticItem': RingStatisticItem
}
new_statistics = []
for item in self.get(path, params=payload):
if item.get('type') in type_map.keys():
new_statistic = type_map[item.get('type')](**item)
if isinstance(new_statistic.value, list):
new_values = []
for value in new_statistic.value:
new_values.append(SimpleStatisticItem(**value))
new_statistic.value = new_values
if statistic is not None:
return new_statistic
new_statistics.append(new_statistic)
return new_statistics
# Cache
def flush_cache(self, domain: str):
path = f'servers/{self.current_server.id}/cache/flush'
payload = {'domain': domain}
return CacheFlushResult(**self.put(path, params=payload))
|
"""
The agent module contains three abstract classes that are subclassed in order to create algorithms.
The classes are:
* Player - for an algorithm that cannot learn and can only play
* Learner - for a learning algorithm controlling a single agent
* MultiLearner - for a learning algorithm of controlling a number of agents
"""
import abc
from typing import List, Iterable
from ezcoach.enviroment import Manifest
class Player(abc.ABC):
"""
The abstract class representing a playing agent. It can be initialized with the manifest of the game
and can react to states by selecting actions.
Both methods are empty and must be implemented in the concrete class.
A class that inherits from the Player class can be used with the Runner's test procedure.
"""
@abc.abstractmethod
def initialize(self, manifest: Manifest):
"""
Initializes the object with the manifest that describe the game.
:param manifest: a Manifest class obtained from the environment.
"""
@abc.abstractmethod
def act(self, state):
"""
Selects an action to be performed in the given state.
:param state: a state received from the environment
:return: an action compliant with the manifest provided in initialize method
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is Player:
methods = ('initialize', 'act')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class Learner(Player):
"""
The abstract class representing an agent that is capable of learning. It inherits from the Player class
and thus it is capable of playing.
Only do_start_episode method must be implemented. Other methods can be left unimplemented and consequently empty.
Rewards are received on the step basis in receive_reward method and on episode basis with episode_ended method.
Methods that ensure persistence are added for convenience.
An agent derived from Learner can be used in both training and testing procedures.
"""
@abc.abstractmethod
def do_start_episode(self, episode: int) -> bool:
"""
Decides if next episode should be started.
:param episode: the number of an episode to be started (starting from 1)
:return: the decision if the next episode should be started
"""
def episode_started(self, episode: int):
"""
Informs the algorithm that the episode was started.
:param episode: the number of the started episode (starting from 1)
"""
def receive_reward(self, previous_state, action, reward: float, accumulated_reward: float, next_state):
"""
Receives a reward from an environment.
:param previous_state: the state that precedes the reward
:param action: the action that precedes the reward
:param reward: the numerical reward signal
:param accumulated_reward: the reward accumulated during the current episode
:param next_state: the state that follow the reward
"""
def episode_ended(self, terminal_state, accumulated_reward):
"""
Receives the accumulated reward for an episode. If a discount is used this value should be ignored
and the actual reward should be calculated using receive_reward method during the episode.
:param terminal_state: the last state of the episode
:param accumulated_reward: the accumulated reward assuming no discount
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is Learner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
class MultiLearner(Learner):
"""
The class representing a learning algorithm capable of controlling a number of agents.
It inherits from Learner class. The list of player numbers is provided in set_players method before each episode.
The number identifying currently acting player is set in set_acting_player method which is invoked before
act and receive_reward methods during an episode and before episode_ended method at the end of an episode.
"""
@abc.abstractmethod
def set_players(self, players: Iterable[int]):
"""
Informs the learner about the players that it will control.
:param players: an iterable of numbers identifying players
"""
@abc.abstractmethod
def set_acting_player(self, player):
"""
Sets the current player that will act, receive reward and end episode.
:param player: a number identifying the acting player
"""
@classmethod
def __subclasshook__(cls, obj):
if cls is MultiLearner:
methods = ('initialize', 'act',
'do_start_episode', 'episode_started', 'receive_reward', 'episode_ended',
'set_players', 'set_acting_player')
if all(any(method in superclass.__dict__
for superclass in obj.__mro__)
for method in methods):
return True
return NotImplemented
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 eNovance
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six.moves.urllib.parse as urlparse
import mock
import requests
from ceilometer.alarm import service
from ceilometer.openstack.common import context
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
DATA_JSON = ('{"current": "ALARM", "alarm_id": "foobar",'
' "reason": "what ?", "reason_data": {"test": "test"},'
' "previous": "OK"}')
NOTIFICATION = dict(alarm_id='foobar',
condition=dict(threshold=42),
reason='what ?',
reason_data={'test': 'test'},
previous='OK',
current='ALARM')
class TestAlarmNotifier(test.BaseTestCase):
def setUp(self):
super(TestAlarmNotifier, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.service = service.AlarmNotifierService('somehost', 'sometopic')
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_init_host(self):
# If we try to create a real RPC connection, init_host() never
# returns. Mock it out so we can establish the service
# configuration.
with mock.patch('ceilometer.openstack.common.rpc.create_connection'):
self.service.start()
def test_notify_alarm(self):
data = {
'actions': ['test://'],
'alarm_id': 'foobar',
'previous': 'OK',
'current': 'ALARM',
'reason': 'Everything is on fire',
'reason_data': {'fire': 'everywhere'}
}
self.service.notify_alarm(context.get_admin_context(), data)
notifications = self.service.notifiers['test'].obj.notifications
self.assertEqual(1, len(notifications))
self.assertEqual((urlparse.urlsplit(data['actions'][0]),
data['alarm_id'],
data['previous'],
data['current'],
data['reason'],
data['reason_data']),
notifications[0])
def test_notify_alarm_no_action(self):
self.service.notify_alarm(context.get_admin_context(), {})
def test_notify_alarm_log_action(self):
self.service.notify_alarm(context.get_admin_context(),
{
'actions': ['log://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
@staticmethod
def _fake_spawn_n(func, *args, **kwargs):
func(*args, **kwargs)
@staticmethod
def _notification(action):
notification = {}
notification.update(NOTIFICATION)
notification['actions'] = [action]
return notification
HTTP_HEADERS = {'content-type': 'application/json'}
def test_notify_alarm_rest_action_ok(self):
action = 'http://host/action'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS)
def test_notify_alarm_rest_action_with_ssl_client_cert(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=certificate, verify=True)
def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
key = "/etc/ssl/cert/whatever.key"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
self.CONF.set_override("rest_notifier_certificate_key", key,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
cert=(certificate, key), verify=True)
def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self):
action = 'https://host/action'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_disable(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=0'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=1'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
headers=self.HTTP_HEADERS,
verify=True)
@staticmethod
def _fake_urlsplit(*args, **kwargs):
raise Exception("Evil urlsplit!")
def test_notify_alarm_invalid_url(self):
with mock.patch('ceilometer.openstack.common.network_utils.urlsplit',
self._fake_urlsplit):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
def test_notify_alarm_invalid_action(self):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
from ckuser import client,server
import os
def print_client_menu():
print("用户菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"用户登录系统"+"-"*10)
print("3"+"-"*10+"用户修改信息"+"-"*10)
print("4"+"-"*10+"用户注册信息"+"-"*10)
print("6"+"-"*10+"退出系统")
def print_server_menu():
print("服务菜单:")
print("-"*25)
print("0"+"-"*10+"显示用户菜单"+"-"*10)
print("1"+"-"*10+"显示服务菜单"+"-"*10)
print("2"+"-"*10+"添加用户帐号"+"-"*10)
print("3"+"-"*10+"删除用户帐号"+"-"*10)
print("4"+"-"*10+"修改用户帐号"+"-"*10)
print("5"+"-"*10+"查找用户帐号"+"-"*10)
print("6"+"-"*10+"退出系统")
def server_oper():
print_server_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
break
elif i == 1:
os.system("clear")
print_server_menu()
elif i == 2:
server.user_add()
elif i == 3:
server.user_del()
elif i == 4:
server.user_update()
elif i == 5:
server.user_find()
elif i == 6:
os.system("clear")
os.system(exit())
except Exception as msg:
os.system("clear")
print_server_menu()
print("输入错误!")
client_oper()
def client_oper():
print_client_menu()
while True:
try:
i = int(input("请输入操作符:"))
if i == 0:
os.system("clear")
print_client_menu()
elif i == 1:
os.system("clear")
break
elif i == 2:
client.login()
elif i == 3:
client.update()
elif i == 4:
client.register()
elif i == 6:
os.system("clear")
os.system(exit())
else:
os.system("clear")
print_client_menu()
print("输入错误!")
except Exception:
os.system("clear")
print_client_menu()
print("输入错误!")
server_oper()
def main():
# server.user_update()
client_oper()
if __name__ == '__main__':
main()
|
import os
import re
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img, img_to_array
IMAGE_SHAPE = [(224, 224), (240, 240), (260, 260), (300, 300), (380, 380), (456, 456), (528, 528), (600, 600)]
def main(paths : list, model_name : str):
try:
model = tf.keras.models.load_model(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name))
except Exception:
print('そのようなモデルはありません')
exit()
model_index = int(re.search('\d', model_name).group(0))
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'model', model_name, 'labels.txt'), mode='r', encoding='utf-8') as f1:
labels = [s.strip() for s in f1.readlines()]
with open('manga_sound_labels.csv', mode='w', encoding='utf-8') as f2:
for path in paths:
if os.path.isfile(path):
try:
img = np.expand_dims(img_to_array(load_img(path,target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(path + ',' + pridict + '\n')
else:
for filename in os.listdir(path):
try:
img = np.expand_dims(img_to_array(load_img(os.path.join(path, filename),target_size=IMAGE_SHAPE[model_index])) / 255, axis=0)
except Exception:
continue
pridict = labels[np.argmax(model.predict(img)[0])]
f2.write(os.path.join(path, filename) + ',' + pridict + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='コマの画像から背景音を予測します')
parser.add_argument('path',nargs='*', help='解析するファイル名かディレクトリ名')
parser.add_argument('--model', default=os.path.join('best','b0'), help='クラス分けに使用するモデル名')
args = parser.parse_args()
if 'manga_sound_labels.csv' in os.listdir(os.getcwd()):
print('manga_sound_labels.csvがすでにあるので終了します')
exit()
main(args.path, args.model)
|
#!/usr/bin/python3
import os
import sys
from shutil import copyfile
import argparse
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
NUMBERED_FILENAME_SPLIT_CHARACTER = "_"
parser = argparse.ArgumentParser(description='')
parser.add_argument('filepath', help='')
parser.add_argument('--force', '-f', action="store_true", help='Override any existing files')
parser.add_argument('--increment', '-i', action="store_true", help='Increment the version number on the file so 00_X.txt will be copied as 01_X.txt')
args = parser.parse_args()
CRUSADER_KINGS_3_CURRENT_MOD_NAME = "CRUSADER_KINGS_3_CURRENT_MOD_NAME"
CRUSADER_KINGS_3_MAIN_DIR = "CRUSADER_KINGS_3_MAIN_DIR"
CRUSADER_KINGS_3_MOD_DIR = "CRUSADER_KINGS_3_MOD_DIR"
mod_name = os.environ.get(CRUSADER_KINGS_3_CURRENT_MOD_NAME, '')
main_directory_str = os.environ.get(CRUSADER_KINGS_3_MAIN_DIR, '').replace(" ", "\\ ")
base_mod_directory_str = os.environ.get(CRUSADER_KINGS_3_MOD_DIR, '').replace(" ", "\\ ")
if not mod_name:
logging.error(f"The {CRUSADER_KINGS_3_CURRENT_MOD_NAME} environment variable must be set")
sys.exit(1)
if not main_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MAIN_DIR} environment variable must be set")
sys.exit(1)
if not base_mod_directory_str:
logging.error(f"The {CRUSADER_KINGS_3_MOD_DIR} environment variable must be set")
sys.exit(1)
main_path = Path(main_directory_str)
if not main_path.exists() or not main_path.is_dir():
logging.error(f"Please ensure that {main_directory_str} points to a valid directory")
sys.exit(1)
base_mod_path = Path(base_mod_directory_str)
if not base_mod_path.exists() or not base_mod_path.is_dir():
logging.error(f"Please ensure that {base_mod_directory_str} points to a valid directory")
sys.exit(1)
mod_directory_str = f"{base_mod_directory_str}/{mod_name}"
mod_path = Path(mod_directory_str)
if not mod_path.exists() or not mod_path.is_dir():
logging.error(f"Please ensure that {mod_directory_str} points to a valid directory")
sys.exit(1)
filepath_str = f"{main_directory_str}/{args.filepath}"
filepath_path = Path(filepath_str)
if not filepath_path.exists() or not filepath_path.is_file():
logging.error(f"Please ensure that {filepath_str} points to an existing file")
sys.exit(1)
destination_filepath = args.filepath
if args.increment:
filepath = Path(args.filepath)
if NUMBERED_FILENAME_SPLIT_CHARACTER in filepath.name:
(n, tail) = filepath.name.split(NUMBERED_FILENAME_SPLIT_CHARACTER, 1)
n = str(int(n) + 1).zfill(len(n))
destination_filepath = str(filepath.parents[0]) + f"/{n}_{tail}"
destination_filepath_str = f"{mod_directory_str}/{destination_filepath}"
destination_filepath_path = Path(destination_filepath_str)
if destination_filepath_path.exists() and not args.force:
logging.error(f"File exists at {destination_filepath_str} already, please use the --force/-f parameter if you want to write over it")
sys.exit(1)
destination_filepath_path.parents[0].mkdir(parents=True, exist_ok=True)
destination_filepath_path.touch(exist_ok=True)
destination_filepath_path.write_text(filepath_path.read_text())
logging.info(f"Created at {destination_filepath_path}")
|
#######################################################################
# Copyright (C) #
# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# 2017 Nicky van Foreest(vanforeest@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
# actions: hit or stand
ACTION_HIT = 0
ACTION_STAND = 1 # "strike" in the book
ACTIONS = [ACTION_HIT, ACTION_STAND]
# policy for player
POLICY_PLAYER = np.zeros(22, dtype=np.int)
for i in range(12, 20):
POLICY_PLAYER[i] = ACTION_HIT
POLICY_PLAYER[20] = ACTION_STAND
POLICY_PLAYER[21] = ACTION_STAND
# function form of target policy of player
def target_policy_player(usable_ace_player, player_sum, dealer_card):
return POLICY_PLAYER[player_sum]
# function form of behavior policy of player
def behavior_policy_player(usable_ace_player, player_sum, dealer_card):
if np.random.binomial(1, 0.5) == 1:
return ACTION_STAND
return ACTION_HIT
# policy for dealer
POLICY_DEALER = np.zeros(22)
for i in range(12, 17):
POLICY_DEALER[i] = ACTION_HIT
for i in range(17, 22):
POLICY_DEALER[i] = ACTION_STAND
# get a new card
def get_card():
card = np.random.randint(1, 14)
card = min(card, 10)
return card
# get the value of a card (11 for ace).
def card_value(card_id):
return 11 if card_id == 1 else card_id
# play a game
# @policy_player: specify policy for player
# @initial_state: [whether player has a usable Ace, sum of player's cards, one card of dealer]
# @initial_action: the initial action
def play(policy_player, initial_state=None, initial_action=None):
# player status
# sum of player
player_sum = 0
# trajectory of player
player_trajectory = []
# whether player uses Ace as 11
usable_ace_player = False
# dealer status
dealer_card1 = 0
dealer_card2 = 0
usable_ace_dealer = False
if initial_state is None:
# generate a random initial state
while player_sum < 12:
# if sum of player is less than 12, always hit
card = get_card()
player_sum += card_value(card)
# If the player's sum is larger than 21, he may hold one or two aces.
if player_sum > 21:
assert player_sum == 22
# last card must be ace
player_sum -= 10
else:
usable_ace_player |= (1 == card)
# initialize cards of dealer, suppose dealer will show the first card he gets
dealer_card1 = get_card()
dealer_card2 = get_card()
else:
# use specified initial state
usable_ace_player, player_sum, dealer_card1 = initial_state
dealer_card2 = get_card()
# initial state of the game
state = [usable_ace_player, player_sum, dealer_card1]
# initialize dealer's sum
dealer_sum = card_value(dealer_card1) + card_value(dealer_card2)
usable_ace_dealer = 1 in (dealer_card1, dealer_card2)
# if the dealer's sum is larger than 21, he must hold two aces.
if dealer_sum > 21:
assert dealer_sum == 22
# use one Ace as 1 rather than 11
dealer_sum -= 10
assert dealer_sum <= 21
assert player_sum <= 21
# game starts!
# player's turn
while True:
if initial_action is not None:
action = initial_action
initial_action = None
else:
# get action based on current sum
action = policy_player(usable_ace_player, player_sum, dealer_card1)
# track player's trajectory for importance sampling
player_trajectory.append([(usable_ace_player, player_sum, dealer_card1), action])
if action == ACTION_STAND:
break
# if hit, get new card
card = get_card()
# Keep track of the ace count. the usable_ace_player flag is insufficient alone as it cannot
# distinguish between having one ace or two.
ace_count = int(usable_ace_player)
if card == 1:
ace_count += 1
player_sum += card_value(card)
# If the player has a usable ace, use it as 1 to avoid busting and continue.
while player_sum > 21 and ace_count:
player_sum -= 10
ace_count -= 1
# player busts
if player_sum > 21:
return state, -1, player_trajectory
assert player_sum <= 21
usable_ace_player = (ace_count == 1)
# dealer's turn
while True:
# get action based on current sum
action = POLICY_DEALER[dealer_sum]
if action == ACTION_STAND:
break
# if hit, get a new card
new_card = get_card()
ace_count = int(usable_ace_dealer)
if new_card == 1:
ace_count += 1
dealer_sum += card_value(new_card)
# If the dealer has a usable ace, use it as 1 to avoid busting and continue.
while dealer_sum > 21 and ace_count:
dealer_sum -= 10
ace_count -= 1
# dealer busts
if dealer_sum > 21:
return state, 1, player_trajectory
usable_ace_dealer = (ace_count == 1)
# compare the sum between player and dealer
assert player_sum <= 21 and dealer_sum <= 21
if player_sum > dealer_sum:
return state, 1, player_trajectory
elif player_sum == dealer_sum:
return state, 0, player_trajectory
else:
return state, -1, player_trajectory
# Monte Carlo Sample with On-Policy
def monte_carlo_on_policy(episodes):
states_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_usable_ace_count = np.ones((10, 10))
states_no_usable_ace = np.zeros((10, 10))
# initialze counts to 1 to avoid 0 being divided
states_no_usable_ace_count = np.ones((10, 10))
for i in tqdm(range(0, episodes)):
_, reward, player_trajectory = play(target_policy_player)
for (usable_ace, player_sum, dealer_card), _ in player_trajectory:
player_sum -= 12
dealer_card -= 1
if usable_ace:
states_usable_ace_count[player_sum, dealer_card] += 1
states_usable_ace[player_sum, dealer_card] += reward
else:
states_no_usable_ace_count[player_sum, dealer_card] += 1
states_no_usable_ace[player_sum, dealer_card] += reward
return states_usable_ace / states_usable_ace_count, states_no_usable_ace / states_no_usable_ace_count
# Monte Carlo with Exploring Starts
def monte_carlo_es(episodes):
# (playerSum, dealerCard, usableAce, action)
state_action_values = np.zeros((10, 10, 2, 2))
# initialze counts to 1 to avoid division by 0
state_action_pair_count = np.ones((10, 10, 2, 2))
# behavior policy is greedy
def behavior_policy(usable_ace, player_sum, dealer_card):
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
# get argmax of the average returns(s, a)
values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \
state_action_pair_count[player_sum, dealer_card, usable_ace, :]
return np.random.choice([action_ for action_, value_ in enumerate(values_) if value_ == np.max(values_)])
# play for several episodes
for episode in tqdm(range(episodes)):
# for each episode, use a randomly initialized state and action
initial_state = [bool(np.random.choice([0, 1])),
np.random.choice(range(12, 22)),
np.random.choice(range(1, 11))]
initial_action = np.random.choice(ACTIONS)
current_policy = behavior_policy if episode else target_policy_player
_, reward, trajectory = play(current_policy, initial_state, initial_action)
first_visit_check = set()
for (usable_ace, player_sum, dealer_card), action in trajectory:
usable_ace = int(usable_ace)
player_sum -= 12
dealer_card -= 1
state_action = (usable_ace, player_sum, dealer_card, action)
if state_action in first_visit_check:
continue
first_visit_check.add(state_action)
# update values of state-action pairs
state_action_values[player_sum, dealer_card, usable_ace, action] += reward
state_action_pair_count[player_sum, dealer_card, usable_ace, action] += 1
return state_action_values / state_action_pair_count
# Monte Carlo Sample with Off-Policy
def monte_carlo_off_policy(episodes):
initial_state = [True, 13, 2]
rhos = []
returns = []
for i in range(0, episodes):
_, reward, player_trajectory = play(behavior_policy_player, initial_state=initial_state)
# get the importance ratio
numerator = 1.0
denominator = 1.0
for (usable_ace, player_sum, dealer_card), action in player_trajectory:
if action == target_policy_player(usable_ace, player_sum, dealer_card):
denominator *= 0.5
else:
numerator = 0.0
break
rho = numerator / denominator
rhos.append(rho)
returns.append(reward)
rhos = np.asarray(rhos)
returns = np.asarray(returns)
weighted_returns = rhos * returns
weighted_returns = np.add.accumulate(weighted_returns)
rhos = np.add.accumulate(rhos)
ordinary_sampling = weighted_returns / np.arange(1, episodes + 1)
with np.errstate(divide='ignore',invalid='ignore'):
weighted_sampling = np.where(rhos != 0, weighted_returns / rhos, 0)
return ordinary_sampling, weighted_sampling
def figure_5_1():
states_usable_ace_1, states_no_usable_ace_1 = monte_carlo_on_policy(10000)
states_usable_ace_2, states_no_usable_ace_2 = monte_carlo_on_policy(500000)
states = [states_usable_ace_1,
states_usable_ace_2,
states_no_usable_ace_1,
states_no_usable_ace_2]
titles = ['Usable Ace, 10000 Episodes',
'Usable Ace, 500000 Episodes',
'No Usable Ace, 10000 Episodes',
'No Usable Ace, 500000 Episodes']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for state, title, axis in zip(states, titles, axes):
fig = sns.heatmap(np.flipud(state), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_1.png')
plt.close()
def figure_5_2():
state_action_values = monte_carlo_es(500000)
state_value_no_usable_ace = np.max(state_action_values[:, :, 0, :], axis=-1)
state_value_usable_ace = np.max(state_action_values[:, :, 1, :], axis=-1)
# get the optimal policy
action_no_usable_ace = np.argmax(state_action_values[:, :, 0, :], axis=-1)
action_usable_ace = np.argmax(state_action_values[:, :, 1, :], axis=-1)
images = [action_usable_ace,
state_value_usable_ace,
action_no_usable_ace,
state_value_no_usable_ace]
titles = ['Optimal policy with usable Ace',
'Optimal value with usable Ace',
'Optimal policy without usable Ace',
'Optimal value without usable Ace']
_, axes = plt.subplots(2, 2, figsize=(40, 30))
plt.subplots_adjust(wspace=0.1, hspace=0.2)
axes = axes.flatten()
for image, title, axis in zip(images, titles, axes):
fig = sns.heatmap(np.flipud(image), cmap="YlGnBu", ax=axis, xticklabels=range(1, 11),
yticklabels=list(reversed(range(12, 22))))
fig.set_ylabel('player sum', fontsize=30)
fig.set_xlabel('dealer showing', fontsize=30)
fig.set_title(title, fontsize=30)
plt.savefig('../images/figure_5_2.png')
plt.close()
def figure_5_3():
true_value = -0.27726
episodes = 10000
runs = 100
error_ordinary = np.zeros(episodes)
error_weighted = np.zeros(episodes)
for i in tqdm(range(0, runs)):
ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)
# get the squared error
error_ordinary += np.power(ordinary_sampling_ - true_value, 2)
error_weighted += np.power(weighted_sampling_ - true_value, 2)
error_ordinary /= runs
error_weighted /= runs
plt.plot(np.arange(1, episodes + 1), error_ordinary, color='green', label='Ordinary Importance Sampling')
plt.plot(np.arange(1, episodes + 1), error_weighted, color='red', label='Weighted Importance Sampling')
plt.ylim(-0.1, 5)
plt.xlabel('Episodes (log scale)')
plt.ylabel(f'Mean square error\n(average over {runs} runs)')
plt.xscale('log')
plt.legend()
plt.savefig('../images/figure_5_3.png')
plt.close()
if __name__ == '__main__':
figure_5_1()
figure_5_2()
figure_5_3()
|
import os
import cv2
source_path = './test_images/'
def processImage(filename, mImage):
if '2019' in filename:
# ----------------------------------
# Remove noise - by applying guassian blur on src image
mImage = cv2.GaussianBlur(mImage, (5, 5), cv2.BORDER_DEFAULT)
# pink rgb values - 255, 153, 255
# white rgb values - 255, 255, 255
# ghost white values - 248, 248, 255
# mImage = mImage[np.where((mImage == [255, 255, 255]).all(axis=2))] = [255, 153, 255]
# working (best performing, descending) - gtruth 55 - 200 (58), 220 (86), 180 (33), 150 (0)
mImage[mImage >= 128] = 200
mImage[mImage < 128] = 0
'''
hsvImg = cv2.cvtColor(mImage,cv2.COLOR_BGR2HSV)
value = 5 # changeable
vValue = hsvImg[..., 2]
hsvImg[..., 2] = np.where((255-vValue) < value, 255, vValue + value)
'''
# save the processed image with a new file name
new_name = source_path + os.path.splitext(filename)[0] + '_processed.jpg'
cv2.imwrite(new_name, mImage)
else:
pass
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
# read the image
img = cv2.imread(os.path.join(source_path, filename))
if img is not None:
processImage(filename, img)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
to_remove = filename.replace('_processed', '')
to_remove_file = os.path.join(source_path, to_remove)
os.remove(to_remove_file)
for filename in os.listdir(source_path):
if filename.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):
if '_processed' in filename:
new_name = filename.replace('_processed', '')
os.rename(os.path.join(source_path, filename), os.path.join(source_path, new_name))
|
"""
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .engine import Engine, ScopeStack
from .passthrough_engine import PassthroughEngine
from .jinja_engine import JinjaEngine
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
"""
design_choice
~~~~~~~~~~~~~~
IMPORTANT: This is a straightforward adaptation of sphinx's todo extension
done by search/replace.
Allow design_choices to be inserted into your documentation.
Inclusion of design_choices can be switched of by a configuration variable.
The design_choice_list directive collects all design_choices of your
project and lists them along with a backlink to the original location.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from typing import Any, Dict, Iterable, List, Tuple, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import sphinx
from sphinx import addnodes
from sphinx.application import Sphinx
# from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.domains import Domain
from sphinx.environment import BuildEnvironment
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging, texescape
from sphinx.util.docutils import SphinxDirective, new_document
from sphinx.util.nodes import make_refnode
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.latex import LaTeXTranslator
logger = logging.getLogger(__name__)
class design_choice_node(nodes.Admonition, nodes.Element):
pass
class design_choice_list(nodes.General, nodes.Element):
pass
class DesignChoice(BaseAdmonition, SphinxDirective):
"""
A design_choice entry, displayed (if configured) in the form of an admonition.
"""
node_class = design_choice_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
"class": directives.class_option,
"name": directives.unchanged,
"title": directives.unchanged,
"prefix": directives.unchanged,
}
def run(self) -> List[Node]:
if not self.options.get("class"):
self.options["class"] = ["admonition-design_choice"]
(design_choice,) = super().run() # type: Tuple[Node]
if isinstance(design_choice, nodes.system_message):
return [design_choice]
elif isinstance(design_choice, design_choice_node):
prefix = ''
if "prefix" in self.options:
prefix = self.options["prefix"] + " "
design_choice.insert(
0, nodes.title(text=prefix + _("Design Choice: ") + self.options["title"])
)
design_choice["docname"] = self.env.docname
self.add_name(design_choice)
self.set_source_info(design_choice)
self.state.document.note_explicit_target(design_choice)
return [design_choice]
else:
raise RuntimeError # never reached here
class DesignChoiceDomain(Domain):
name = "design_choice"
label = "design_choice"
@property
def design_choices(self) -> Dict[str, List[design_choice_node]]:
return self.data.setdefault("design_choices", {})
def clear_doc(self, docname: str) -> None:
self.design_choices.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
for docname in docnames:
self.design_choices[docname] = otherdata["design_choices"][docname]
def process_doc(
self, env: BuildEnvironment, docname: str, document: nodes.document
) -> None:
design_choices = self.design_choices.setdefault(docname, [])
for design_choice in document.traverse(design_choice_node):
env.app.emit("design_choice-defined", design_choice)
design_choices.append(design_choice)
if env.config.design_choice_emit_warnings:
logger.warning(
__("TODO entry found: %s"),
design_choice[1].astext(),
location=design_choice,
)
def process_design_choices(app: Sphinx, doctree: nodes.document) -> None:
# warnings.warn(
# "process_design_choices() is deprecated.",
# RemovedInSphinx40Warning,
# stacklevel=2,
# )
# collect all design_choices in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
env = app.builder.env
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = [] # type: ignore
for node in doctree.traverse(design_choice_node):
app.events.emit("design_choice-defined", node)
newnode = node.deepcopy()
newnode["ids"] = []
env.design_choice_all_design_choices.append(
{ # type: ignore
"docname": env.docname,
"source": node.source or env.doc2path(env.docname),
"lineno": node.line,
"design_choice": newnode,
"target": node["ids"][0],
}
)
if env.config.design_choice_emit_warnings:
label = cast(nodes.Element, node[1])
logger.warning(__("TODO entry found: %s"), label.astext(), location=node)
class DesignChoiceList(SphinxDirective):
"""
A list of all design_choice entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
def run(self) -> List[Node]:
# Simply insert an empty design_choice_list node which will be replaced later
# when process_design_choice_nodes is called
return [design_choice_list("")]
class DesignChoiceListProcessor:
def __init__(self, app: Sphinx, doctree: nodes.document, docname: str) -> None:
self.builder = app.builder
self.config = app.config
self.env = app.env
self.domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
self.document = new_document("")
self.process(doctree, docname)
def process(self, doctree: nodes.document, docname: str) -> None:
design_choices = sum(
self.domain.design_choices.values(), []
) # type: List[design_choice_node]
for node in doctree.traverse(design_choice_list):
if not self.config.design_choice_include_design_choices:
node.parent.remove(node)
continue
if node.get("ids"):
content = [nodes.target()] # type: List[Element]
else:
content = []
for design_choice in design_choices:
# Create a copy of the design_choice node
new_design_choice = design_choice.deepcopy()
new_design_choice["ids"].clear()
self.resolve_reference(new_design_choice, docname)
content.append(new_design_choice)
design_choice_ref = self.create_design_choice_reference(
design_choice, docname
)
content.append(design_choice_ref)
node.replace_self(content)
def create_design_choice_reference(
self, design_choice: design_choice_node, docname: str
) -> nodes.paragraph:
if self.config.design_choice_link_only:
description = _("<<original entry>>")
else:
description = _("(The <<original entry>> is located in %s, line %d.)") % (
design_choice.source,
design_choice.line,
)
prefix = description[: description.find("<<")]
suffix = description[description.find(">>") + 2 :]
para = nodes.paragraph(classes=["design_choice-source"])
para += nodes.Text(prefix, prefix)
# Create a reference
linktext = nodes.emphasis(_("original entry"), _("original entry"))
reference = nodes.reference("", "", linktext, internal=True)
try:
reference["refuri"] = self.builder.get_relative_uri(
docname, design_choice["docname"]
)
reference["refuri"] += "#" + design_choice["ids"][0]
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
para += reference
para += nodes.Text(suffix, suffix)
return para
def resolve_reference(
self, design_choice: design_choice_node, docname: str
) -> None:
"""Resolve references in the design_choice content."""
for node in design_choice.traverse(addnodes.pending_xref):
if "refdoc" in node:
node["refdoc"] = docname
# Note: To resolve references, it is needed to wrap it with document node
self.document += design_choice
self.env.resolve_references(self.document, docname, self.builder)
self.document.remove(design_choice)
def process_design_choice_nodes(
app: Sphinx, doctree: nodes.document, fromdocname: str
) -> None:
"""Replace all design_choice_list nodes with a list of the collected design_choices.
Augment each design_choice with a backlink to the original location.
"""
# warnings.warn(
# "process_design_choice_nodes() is deprecated.",
# RemovedInSphinx40Warning,
# stacklevel=2,
# )
domain = cast(DesignChoiceDomain, app.env.get_domain("design_choice"))
design_choices = sum(
domain.design_choices.values(), []
) # type: List[design_choice_node]
for node in doctree.traverse(design_choice_list):
if node.get("ids"):
content = [nodes.target()] # type: List[Element]
else:
content = []
if not app.config["design_choice_include_design_choices"]:
node.replace_self(content)
continue
for design_choice_info in design_choices:
para = nodes.paragraph(classes=["design_choice-source"])
if app.config["design_choice_link_only"]:
description = _("<<original entry>>")
else:
description = _(
"(The <<original entry>> is located in %s, line %d.)"
) % (design_choice_info.source, design_choice_info.line)
desc1 = description[: description.find("<<")]
desc2 = description[description.find(">>") + 2 :]
para += nodes.Text(desc1, desc1)
# Create a reference
innernode = nodes.emphasis(_("original entry"), _("original entry"))
try:
para += make_refnode(
app.builder,
fromdocname,
design_choice_info["docname"],
design_choice_info["ids"][0],
innernode,
)
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
para += nodes.Text(desc2, desc2)
design_choice_entry = design_choice_info.deepcopy()
design_choice_entry["ids"].clear()
# (Recursively) resolve references in the design_choice content
app.env.resolve_references(design_choice_entry, design_choice_info["docname"], app.builder) # type: ignore # NOQA
# Insert into the design_choice_list
content.append(design_choice_entry)
content.append(para)
node.replace_self(content)
def purge_design_choices(app: Sphinx, env: BuildEnvironment, docname: str) -> None:
# warnings.warn(
# "purge_design_choices() is deprecated.", RemovedInSphinx40Warning, stacklevel=2
# )
if not hasattr(env, "design_choice_all_design_choices"):
return
env.design_choice_all_design_choices = [
design_choice
for design_choice in env.design_choice_all_design_choices # type: ignore
if design_choice["docname"] != docname
]
def merge_info(
app: Sphinx, env: BuildEnvironment, docnames: Iterable[str], other: BuildEnvironment
) -> None:
# warnings.warn("merge_info() is deprecated.", RemovedInSphinx40Warning, stacklevel=2)
if not hasattr(other, "design_choice_all_design_choices"):
return
if not hasattr(env, "design_choice_all_design_choices"):
env.design_choice_all_design_choices = [] # type: ignore
env.design_choice_all_design_choices.extend(other.design_choice_all_design_choices) # type: ignore
def visit_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
if self.config.design_choice_include_design_choices:
self.visit_admonition(node)
else:
raise nodes.SkipNode
def depart_design_choice_node(self: HTMLTranslator, node: design_choice_node) -> None:
self.depart_admonition(node)
def latex_visit_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
if self.config.design_choice_include_design_choices:
self.body.append("\n\\begin{sphinxadmonition}{note}{")
self.body.append(self.hypertarget_to(node))
title_node = cast(nodes.title, node[0])
title = texescape.escape(title_node.astext(), self.config.latex_engine)
self.body.append("%s:}" % title)
node.pop(0)
else:
raise nodes.SkipNode
def latex_depart_design_choice_node(
self: LaTeXTranslator, node: design_choice_node
) -> None:
self.body.append("\\end{sphinxadmonition}\n")
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_event("design_choice-defined")
app.add_config_value("design_choice_include_design_choices", False, "html")
app.add_config_value("design_choice_link_only", False, "html")
app.add_config_value("design_choice_emit_warnings", False, "html")
app.add_node(design_choice_list)
app.add_node(
design_choice_node,
html=(visit_design_choice_node, depart_design_choice_node),
latex=(latex_visit_design_choice_node, latex_depart_design_choice_node),
text=(visit_design_choice_node, depart_design_choice_node),
man=(visit_design_choice_node, depart_design_choice_node),
texinfo=(visit_design_choice_node, depart_design_choice_node),
)
app.add_directive("design_choice", DesignChoice)
app.add_directive("design_choice_list", DesignChoiceList)
app.add_domain(DesignChoiceDomain)
app.connect("doctree-resolved", DesignChoiceListProcessor)
return {
"version": sphinx.__display_version__,
"env_version": 2,
"parallel_read_safe": True,
}
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import shlex
from knack.util import CLIError
import azext_alias
from azext_alias.argument import get_placeholders
from azext_alias._const import (
COLLISION_CHECK_LEVEL_DEPTH,
INVALID_ALIAS_COMMAND_ERROR,
EMPTY_ALIAS_ERROR,
INVALID_STARTING_CHAR_ERROR,
INCONSISTENT_ARG_ERROR,
COMMAND_LVL_ERROR
)
from azext_alias.alias import AliasManager
def process_alias_create_namespace(namespace):
"""
Validate input arguments when the user invokes 'az alias create'.
Args:
namespace: argparse namespace object.
"""
_validate_alias_name(namespace.alias_name)
_validate_alias_command(namespace.alias_command)
_validate_alias_command_level(namespace.alias_name, namespace.alias_command)
_validate_pos_args_syntax(namespace.alias_name, namespace.alias_command)
def _validate_alias_name(alias_name):
"""
Check if the alias name is valid.
Args:
alias_name: The name of the alias to validate.
"""
if not alias_name:
raise CLIError(EMPTY_ALIAS_ERROR)
if not re.match('^[a-zA-Z]', alias_name):
raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0]))
def _validate_alias_command(alias_command):
"""
Check if the alias command is valid.
Args:
alias_command: The command to validate.
"""
if not alias_command:
raise CLIError(EMPTY_ALIAS_ERROR)
# Boundary index is the index at which named argument or positional argument starts
split_command = shlex.split(alias_command)
boundary_index = len(split_command)
for i, subcommand in enumerate(split_command):
if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH:
boundary_index = i
break
# Extract possible CLI commands and validate
command_to_validate = ' '.join(split_command[:boundary_index]).lower()
for command in azext_alias.cached_reserved_commands:
if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command):
return
raise CLIError(INVALID_ALIAS_COMMAND_ERROR.format(command_to_validate if command_to_validate else alias_command))
def _validate_pos_args_syntax(alias_name, alias_command):
"""
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
"""
pos_args_from_alias = get_placeholders(alias_name)
# Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})
# Split by '.' to extract positional argument name from function call (e.g. {{ arg_name.split()[0] }})
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are'))
def _validate_alias_command_level(alias, command):
"""
Make sure that if the alias is a reserved command, the command that the alias points to
in the command tree does not conflict in levels.
e.g. 'dns' -> 'network dns' is valid because dns is a level 2 command and network dns starts at level 1.
However, 'list' -> 'show' is not valid because list and show are both reserved commands at level 2.
Args:
alias: The name of the alias.
command: The command that the alias points to.
"""
alias_collision_table = AliasManager.build_collision_table([alias], azext_alias.cached_reserved_commands)
# Alias is not a reserved command, so it can point to any command
if not alias_collision_table:
return
command_collision_table = AliasManager.build_collision_table([command], azext_alias.cached_reserved_commands)
alias_collision_levels = alias_collision_table.get(alias.split()[0], [])
command_collision_levels = command_collision_table.get(command.split()[0], [])
# Check if there is a command level conflict
if set(alias_collision_levels) & set(command_collision_levels):
raise CLIError(COMMAND_LVL_ERROR.format(alias, command))
|
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Changgon Kim, Mingeyong Yang, Taeeun Kim
# @Date: 2021-04-26 17:14
# @Last modified by: Changgon Kim
from __future__ import absolute_import, division, print_function
class LvmIebError(Exception):
"""A custom core LvmIeb exception"""
def __init__(self, message=None):
message = "There has been an error" if not message else message
super(LvmIebError, self).__init__(message)
class LvmIebNotImplemented(LvmIebError):
"""A custom exception for not yet implemented features."""
def __init__(self, message=None):
message = "This feature is not implemented yet." if not message else message
super(LvmIebNotImplemented, self).__init__(message)
class LvmIebAPIError(LvmIebError):
"""A custom exception for API errors"""
def __init__(self, message=None):
if not message:
message = "Error with Http Response from LvmIeb API"
else:
message = "Http response error from LvmIeb API. {0}".format(message)
super(LvmIebAPIError, self).__init__(message)
class LvmIebApiAuthError(LvmIebAPIError):
"""A custom exception for API authentication errors"""
pass
class LvmIebMissingDependency(LvmIebError):
"""A custom exception for missing dependencies."""
pass
class LvmIebWarning(Warning):
"""Base warning for LvmIeb."""
class LvmIebUserWarning(UserWarning, LvmIebWarning):
"""The primary warning class."""
pass
class LvmIebSkippedTestWarning(LvmIebUserWarning):
"""A warning for when a test is skipped."""
pass
class LvmIebDeprecationWarning(LvmIebUserWarning):
"""A warning for deprecated features."""
pass
|
import os
from collections import defaultdict
import numpy as np
import torch
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from common import utils
class Manager():
def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):
# params status
self.params = params
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
self.logger = logger
self.epoch = 0
self.step = 0
self.best_val_score = np.inf
self.cur_val_score = np.inf
self.best_test_score = np.inf
self.cur_test_score = np.inf
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# init local tensorboard and html
self.init_tb_and_html()
def init_tb_and_html(self):
# tensorboard loss
local_tb_dir = os.path.join(self.params.model_dir, "summary/loss")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)
# tensorboard metric
local_tb_dir = os.path.join(self.params.model_dir, "summary/metric")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)
# html
local_html_dir = os.path.join(self.params.model_dir, "summary/html")
os.makedirs(local_html_dir, exist_ok=True)
self.local_html_dir = local_html_dir
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, batch_size):
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=batch_size)
def update_metric_status(self, metrics, split, batch_size):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=batch_size)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=batch_size)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def summarize_metric_status(self, metrics, split):
if split == "val":
for k in metrics:
if k.endswith('MSE'):
self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))
else:
continue
elif split == "test":
for k in metrics:
if k.endswith('MSE'):
self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))
else:
continue
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong split string: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.4f} ".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red", only_best=False):
if split == "val":
metric_status = self.val_status
is_best = self.cur_val_score < self.best_val_score
elif split == "test":
metric_status = self.test_status
is_best = self.cur_test_score < self.best_test_score
else:
raise ValueError("Wrong split string: {}".format(split))
print_str = " | ".join("{}: {:4g}".format(k, v.avg) for k, v in metric_status.items())
if only_best:
if is_best:
self.logger.info(colored("Best Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
else:
self.logger.info(colored("Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
def write_loss_to_tb(self, split):
for k, v in self.loss_status.items():
if split == "train":
self.local_loss_writter.add_scalar("train_Loss/{}".format(k), v.val, self.step)
elif split == "val":
self.local_loss_writter.add_scalar("val_Loss/{}".format(k), v.val, self.step)
elif split == "test":
self.local_loss_writter.add_scalar("test_Loss/{}".format(k), v.val, self.step)
else:
raise ValueError("Wrong split string: {}".format(split))
def write_metric_to_tb(self, split):
if split == "val":
for k, v in self.val_status.items():
self.local_metric_writter.add_scalar("val_Metric/{}".format(k), v.avg, self.epoch)
elif split == "test":
for k, v in self.test_status.items():
self.local_metric_writter.add_scalar("test_Metric/{}".format(k), v.avg, self.epoch)
else:
raise ValueError("Wrong split string: {}".format(split))
def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if self.dataloaders["val"] is not None:
state["best_val_score"] = self.best_val_score
if self.dataloaders["test"] is not None:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % save_latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
torch.save(state, latest_ckpt_name)
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if self.dataloaders["val"] is not None:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
is_best = self.cur_val_score < self.best_val_score
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.7f}".format(self.best_val_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
if self.dataloaders["test"] is not None:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
is_best = self.cur_test_score < self.best_test_score
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.7f}".format(self.best_test_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
state = torch.load(self.params.restore_file)
ckpt_component = []
if "state_dict" in state and self.model is not None:
try:
self.model.load_state_dict(state["state_dict"])
except RuntimeError:
print("Using custom loading net")
net_dict = self.model.state_dict()
if "module" not in list(state["state_dict"].keys())[0]:
state_dict = {"module." + k: v for k, v in state["state_dict"].items() if "module." + k in net_dict.keys()}
else:
state_dict = {k: v for k, v in state["state_dict"].items() if k in net_dict.keys()}
net_dict.update(state_dict)
self.model.load_state_dict(net_dict, strict=False)
ckpt_component.append("net")
if not self.params.only_weights:
if "optimizer" in state and self.optimizer is not None:
try:
self.optimizer.load_state_dict(state["optimizer"])
except RuntimeError:
print("Using custom loading optimizer")
optimizer_dict = self.optimizer.state_dict()
state_dict = {k: v for k, v in state["optimizer"].items() if k in optimizer_dict.keys()}
optimizer_dict.update(state_dict)
self.optimizer.load_state_dict(optimizer_dict)
ckpt_component.append("opt")
if "scheduler" in state and self.train_status["scheduler"] is not None:
try:
self.scheduler.load_state_dict(state["scheduler"])
except RuntimeError:
print("Using custom loading scheduler")
scheduler_dict = self.scheduler.state_dict()
state_dict = {k: v for k, v in state["scheduler"].items() if k in scheduler_dict.keys()}
scheduler_dict.update(state_dict)
self.scheduler.load_state_dict(scheduler_dict)
ckpt_component.append("sch")
if "step" in state:
self.step = state["step"] + 1
ckpt_component.append("step")
if "epoch" in state:
self.epoch = state["epoch"] + 1
ckpt_component.append("epoch")
if "best_val_score" in state:
self.best_val_score = state["best_val_score"]
ckpt_component.append("best val score: {:.3g}".format(self.best_val_score))
if "best_test_score" in state:
self.best_test_score = state["best_test_score"]
ckpt_component.append("best test score: {:.3g}".format(self.best_test_score))
ckpt_component = ", ".join(i for i in ckpt_component)
self.logger.info("Loaded models from: {}".format(self.params.restore_file))
self.logger.info("Ckpt load: {}".format(ckpt_component))
|
import asyncio
import discord
import time
import parsedatetime
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import ReadableTime
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
mute = bot.get_cog("Mute")
bot.add_cog(Strike(bot, settings, mute))
# This is the Strike module. It keeps track of warnings and kicks/bans accordingly
# Strikes = [ time until drops off ]
# StrikeOut = 3 (3 strikes and you're out)
# StrikeLevel (a list similar to xproles)
# Standard strike roles:
# 0 = Not been punished already
# 1 = Muted for x amount of time
# 2 = Already been kicked (id in kick list)
# 3 = Already been banned (auto-mute)
class Strike(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings, mute):
self.bot = bot
self.settings = settings
self.mute = mute
self.loop_list = []
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def onjoin(self, member, server):
# Check id against the kick and ban list and react accordingly
kickList = self.settings.getServerStat(server, "KickList")
if str(member.id) in kickList:
# The user has been kicked before - set their strikeLevel to 2
self.settings.setUserStat(member, server, "StrikeLevel", 2)
banList = self.settings.getServerStat(server, "BanList")
if str(member.id) in banList:
# The user has been kicked before - set their strikeLevel to 3
# Also mute them
self.settings.setUserStat(member, server, "StrikeLevel", 3)
self.settings.setUserStat(member, server, "Muted", True)
self.settings.setUserStat(member, server, "Cooldown", None)
await self.mute._mute(member, server)
# Proof of concept stuff for reloading cog/extension
def _is_submodule(self, parent, child):
return parent == child or child.startswith(parent + ".")
@commands.Cog.listener()
async def on_unloaded_extension(self, ext):
# Called to shut things down
if not self._is_submodule(ext.__name__, self.__module__):
return
for task in self.loop_list:
task.cancel()
@commands.Cog.listener()
async def on_loaded_extension(self, ext):
# See if we were loaded
if not self._is_submodule(ext.__name__, self.__module__):
return
self.bot.loop.create_task(self.start_loading())
async def start_loading(self):
await self.bot.wait_until_ready()
await self.bot.loop.run_in_executor(None, self.check_strikes)
def check_strikes(self):
# Check all strikes - and start timers
print("Checking strikes...")
t = time.time()
for server in self.bot.guilds:
for member in server.members:
strikes = self.settings.getUserStat(member, server, "Strikes")
if strikes == None:
continue
if len(strikes):
# We have a list
for strike in strikes:
# Make sure it's a strike that *can* roll off
if not strike['Time'] == -1:
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
print("Strikes checked - took {} seconds.".format(time.time() - t))
async def checkStrike(self, member, strike):
# Start our countdown
countDown = int(strike['Time'])-int(time.time())
if countDown > 0:
# We have a positive countdown - let's wait
await asyncio.sleep(countDown)
strikes = self.settings.getUserStat(member, member.guild, "Strikes")
# Verify strike is still valid
if not strike in strikes:
return
strikes.remove(strike)
self.settings.setUserStat(member, member.guild, "Strikes", strikes)
@commands.command(pass_context=True)
async def strike(self, ctx, member: discord.Member = None, days=None, *, message: str = None):
"""Give a user a strike (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}strike [member] [strike timeout (in days) - 0 = forever] [message (optional)]`'.format(
ctx.prefix)
await ctx.channel.send(msg)
return
# Check if we're striking ourselves
if member.id == ctx.message.author.id:
# We're giving ourselves a strike?
await ctx.channel.send('You can\'t give yourself a strike, silly.')
return
# Check if the bot is getting the strike
if member.id == self.bot.user.id:
await ctx.channel.send('I can\'t do that, *{}*.'.format(DisplayName.name(ctx.message.author)))
return
# Check if we're striking another admin/bot-admin
isAdmin = member.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in member.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
if isAdmin:
await ctx.channel.send('You can\'t give other admins/bot-admins strikes, bub.')
return
# Check if days is an int - otherwise assume it's part of the message
try:
days = int(days)
except Exception:
if not days == None:
if message == None:
message = days
else:
message = days + ' ' + message
days = 0
# If it's not at least a day, it's forever
if days < 1:
days = -1
currentTime = int(time.time())
# Build our Strike
strike = {}
if days == -1:
strike['Time'] = -1
else:
strike['Time'] = currentTime+(86400*days)
self.loop_list.append(self.bot.loop.create_task(
self.checkStrike(member, strike)))
strike['Message'] = message
strike['GivenBy'] = ctx.message.author.id
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
strikes.append(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
strikeNum = len(strikes)
# Set up consequences
if strikeLevel == 0:
consequence = '**muted for a day**.'
elif strikeLevel == 1:
consequence = '**kicked**.'
else:
consequence = '**banned**.'
# Check if we've struck out
if strikeNum < strikeout:
# We haven't struck out yet
msg = '*{}* has just received *strike {}*. *{}* more and they will be {}'.format(
DisplayName.name(member), strikeNum, strikeout-strikeNum, consequence)
else:
# We struck out - let's evaluate
if strikeLevel == 0:
cooldownFinal = currentTime+86400
checkRead = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'You have been muted in *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
mutemessage = 'You have been muted in *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
# Check if already muted
alreadyMuted = self.settings.getUserStat(
member, ctx.message.guild, "Muted")
if alreadyMuted:
# Find out for how long
muteTime = self.settings.getUserStat(
member, ctx.message.guild, "Cooldown")
if not muteTime == None:
if muteTime < cooldownFinal:
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, cooldownFinal)
if message:
mutemessage = 'Your muted time in *{}* has been extended to *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), timeRemains, message)
else:
mutemessage = 'You muted time in *{}* has been extended to *{}*.'.format(
Nullify.escape_all(ctx.guild.name), timeRemains)
else:
self.settings.setUserStat(
member, ctx.message.guild, "Muted", True)
self.settings.setUserStat(
member, ctx.message.guild, "Cooldown", cooldownFinal)
await self.mute._mute(member, ctx.message.guild, cooldownFinal)
await member.send(mutemessage)
elif strikeLevel == 1:
kickList = self.settings.getServerStat(
ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
if message:
kickmessage = 'You have been kicked from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
kickmessage = 'You have been kicked from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(kickmessage)
await ctx.guild.kick(member)
else:
banList = self.settings.getServerStat(
ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "BanList", banList)
if message:
banmessage = 'You have been banned from *{}*.\nThe Reason:\n{}'.format(
Nullify.escape_all(ctx.guild.name), message)
else:
banmessage = 'You have been banned from *{}*.'.format(
Nullify.escape_all(ctx.guild.name))
await member.send(banmessage)
await ctx.guild.ban(member)
self.settings.incrementStat(
member, ctx.message.guild, "StrikeLevel", 1)
self.settings.setUserStat(member, ctx.message.guild, "Strikes", [])
msg = '*{}* has just received *strike {}*. They have been {}'.format(
DisplayName.name(member), strikeNum, consequence)
await ctx.channel.send(msg)
@strike.error
async def strike_error(self, ctx, error):
# do stuff
msg = 'strike Error: {}'.format(error)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikes(self, ctx, *, member=None):
"""Check a your own, or another user's total strikes (bot-admin needed to check other users)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
# Only allow admins to check others' strikes
if not isAdmin:
if member:
if not member.id == ctx.message.author.id:
await ctx.channel.send('You are not a bot-admin. You can only see your own strikes.')
member = ctx.message.author
# Create blank embed
stat_embed = discord.Embed(color=member.color)
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
strikeLevel = int(self.settings.getUserStat(
member, ctx.message.guild, "StrikeLevel"))
# Add strikes, and strike level
stat_embed.add_field(name="Strikes", value=len(strikes), inline=True)
stat_embed.add_field(name="Strike Level",
value=strikeLevel, inline=True)
# Get member's avatar url
avURL = member.avatar_url
if not len(avURL):
avURL = member.default_avatar_url
if member.nick:
# We have a nickname
msg = "__***{},*** **who currently goes by** ***{}:***__\n\n".format(
member.name, member.nick)
# Add to embed
stat_embed.set_author(name='{}, who currently goes by {}'.format(
member.name, member.nick), icon_url=avURL)
else:
msg = "__***{}:***__\n\n".format(member.name)
# Add to embed
stat_embed.set_author(name='{}'.format(
member.name), icon_url=avURL)
# Get messages - and cooldowns
currentTime = int(time.time())
if not len(strikes):
# no strikes
messages = "None."
cooldowns = "None."
givenBy = "None."
else:
messages = ''
cooldowns = ''
givenBy = ''
for i in range(0, len(strikes)):
if strikes[i]['Message']:
messages += '{}. {}\n'.format(i+1, strikes[i]['Message'])
else:
messages += '{}. No message\n'.format(i+1)
timeLeft = strikes[i]['Time']
if timeLeft == -1:
cooldowns += '{}. Never rolls off\n'.format(i+1)
else:
timeRemains = ReadableTime.getReadableTimeBetween(
currentTime, timeLeft)
cooldowns += '{}. {}\n'.format(i+1, timeRemains)
given = strikes[i]['GivenBy']
givenBy += '{}. {}\n'.format(i+1, DisplayName.name(
DisplayName.memberForID(given, ctx.message.guild)))
# Add messages and cooldowns
stat_embed.add_field(name="Messages", value=messages, inline=True)
stat_embed.add_field(name="Time Left", value=cooldowns, inline=True)
stat_embed.add_field(name="Given By", value=givenBy, inline=True)
# Strikes remaining
stat_embed.add_field(name="Strikes Remaining",
value=strikeout-len(strikes), inline=True)
await ctx.channel.send(embed=stat_embed)
@commands.command(pass_context=True)
async def removestrike(self, ctx, *, member=None):
"""Removes a strike given to a member (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removestrike [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
# We have what we need - get the list
strikes = self.settings.getUserStat(
member, ctx.message.guild, "Strikes")
# Return if no strikes to take
if not len(strikes):
await ctx.channel.send('*{}* has no strikes to remove.'.format(DisplayName.name(member)))
return
# We have some - naughty naughty!
strikes = sorted(strikes, key=lambda x: int(x['Time']))
for strike in strikes:
# Check if we've got one that's not -1
if not strike['Time'] == -1:
# First item that isn't forever - kill it
strikes.remove(strike)
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
# If we're here - we just remove one
del strikes[0]
self.settings.setUserStat(
member, ctx.message.guild, "Strikes", strikes)
await ctx.channel.send('*{}* has one less strike. They are down to *{}*.'.format(DisplayName.name(member), len(strikes)))
return
@commands.command(pass_context=True)
async def setstrikelevel(self, ctx, *, member=None, strikelevel: int = None):
"""Sets the strike level of the passed user (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
author = ctx.message.author
server = ctx.message.guild
channel = ctx.message.channel
usage = 'Usage: `{}setstrikelevel [member] [strikelevel]`'.format(
ctx.prefix)
if member == None:
await ctx.channel.send(usage)
return
# Check for formatting issues
if strikelevel == None:
# Either strike level wasn't set - or it's the last section
if type(member) is str:
# It' a string - the hope continues
nameCheck = DisplayName.checkNameForInt(member, server)
if not nameCheck:
await ctx.channel.send(usage)
return
if not nameCheck["Member"]:
msg = 'I couldn\'t find *{}* on the server.'.format(
Nullify.escape_all(member))
await ctx.channel.send(msg)
return
member = nameCheck["Member"]
strikelevel = nameCheck["Int"]
if strikelevel == None:
# Still no strike level
await ctx.channel.send(usage)
return
self.settings.setUserStat(
member, ctx.message.guild, "StrikeLevel", strikelevel)
msg = '*{}\'s* strike level has been set to *{}!*'.format(
DisplayName.name(member), strikelevel)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addkick(self, ctx, *, member=None):
"""Adds the passed user to the kick list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addkick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if not str(member.id) in kickList:
kickList.append(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was added to the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removekick(self, ctx, *, member=None):
"""Removes the passed user from the kick list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removekick [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
kickList.remove(str(member.id))
self.settings.setServerStat(
ctx.message.guild, "KickList", kickList)
msg = '*{}* was removed from the kick list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def addban(self, ctx, *, member=None):
"""Adds the passed user to the ban list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}addban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if not str(member.id) in banList:
banList.append(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was added to the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* is already in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def removeban(self, ctx, *, member=None):
"""Removes the passed user from the ban list (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
msg = 'Usage: `{}removeban [member]`'.format(ctx.prefix)
await ctx.channel.send(msg)
return
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
msg = ''
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
banList.remove(str(member.id))
self.settings.setServerStat(ctx.message.guild, "BanList", banList)
msg = '*{}* was removed from the ban list.'.format(
DisplayName.name(member))
else:
msg = '*{}* was not found in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def iskicked(self, ctx, *, member=None):
"""Lists whether the user is in the kick list."""
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
kickList = self.settings.getServerStat(ctx.message.guild, "KickList")
if str(member.id) in kickList:
msg = '*{}* is in the kick list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the kick list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def isbanned(self, ctx, *, member=None):
"""Lists whether the user is in the ban list."""
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(
Nullify.escape_all(memberName))
await ctx.channel.send(msg)
return
banList = self.settings.getServerStat(ctx.message.guild, "BanList")
if str(member.id) in banList:
msg = '*{}* is in the ban list.'.format(DisplayName.name(member))
else:
msg = '*{}* is **not** in the ban list.'.format(
DisplayName.name(member))
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def strikelimit(self, ctx):
"""Lists the number of strikes before advancing to the next consequence."""
strikeout = int(self.settings.getServerStat(
ctx.message.guild, "StrikeOut"))
msg = '*{}* strikes are required to strike out.'.format(strikeout)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setstrikelimit(self, ctx, limit=None):
"""Sets the number of strikes before advancing to the next consequence (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(
ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(
ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if not limit:
await ctx.channel.send('Strike limit must be *at least* one.')
return
try:
limit = int(limit)
except Exception:
await ctx.channel.send('Strike limit must be an integer.')
return
self.settings.setServerStat(ctx.message.guild, "StrikeOut", limit)
msg = '*{}* strikes are now required to strike out.'.format(limit)
await ctx.channel.send(msg)
@setstrikelimit.error
async def setstrikelimit_error(self, ctx, error):
# do stuff
msg = 'setstrikelimit Error: {}'.format(ctx)
await error.channel.send(msg)
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
import ctypes.util
from functools import reduce
from ctypes import c_int, c_char_p, c_uint32
from watchdog.utils import has_attribute
from watchdog.utils import UnsupportedLibc
from watchdog.utils.unicode_paths import decode
def _load_libc():
libc_path = None
try:
libc_path = ctypes.util.find_library('c')
except (OSError, IOError, RuntimeError):
# Note: find_library will on some platforms raise these undocumented
# errors, e.g.on android IOError "No usable temporary directory found"
# will be raised.
pass
if libc_path is not None:
return ctypes.CDLL(libc_path)
# Fallbacks
try:
return ctypes.CDLL('libc.so')
except (OSError, IOError):
pass
try:
return ctypes.CDLL('libc.so.6')
except (OSError, IOError):
pass
# uClibc
try:
return ctypes.CDLL('libc.so.0')
except (OSError, IOError) as err:
raise err
libc = _load_libc()
if not has_attribute(libc, 'inotify_init') or \
not has_attribute(libc, 'inotify_add_watch') or \
not has_attribute(libc, 'inotify_rm_watch'):
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
class InotifyConstants(object):
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
"""
Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
if os.path.isdir(path):
self._add_dir_watch(path, recursive, event_mask)
else:
self._add_watch(path, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""
Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
if self.is_recursive:
for _path, _wd in self._wd_for_path.copy().items():
if _path.startswith(move_src_path + os.path.sep.encode()):
moved_wd = self._wd_for_path.pop(_path)
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
self._wd_for_path[_move_to_path] = moved_wd
self._path_for_wd[moved_wd] = _move_to_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory
and inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
else:
raise OSError(err, os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Base name of the event source path.
:param src_path:
Full event source path.
"""
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
return (self.is_delete_self or self.is_move_self
or self._mask & InotifyConstants.IN_ISDIR > 0)
@property
def key(self):
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
@staticmethod
def _get_mask_string(mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>'
return s % (type(self).__name__, self.src_path, self.wd, mask_string,
self.cookie, decode(self.name))
|
#!/usr/bin/env python
# coding=utf-8
"""
__created__ = '06/01/2017'
__author__ = 'deling.ma'
"""
from aio_rest.routes import RouteCollector, Route
from example.views import publish, IndexView
routes = RouteCollector(prefix='/app', routes=[
Route('/', IndexView),
Route('/publish', publish, method='GET'),
])
|
import tensorflow as tf
#placeholder variable(scalar)
X=tf.placeholder(tf.float32,shape=[None])
Y=tf.placeholder(tf.float32,shape=[None])
W=tf.Variable(tf.random_normal([1]),name='weight')
b=tf.Variable(tf.random_normal([1]),name='bias')
hypothesis=X*W+b
#average
cost=tf.reduce_mean(tf.square(hypothesis-Y))
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)
#minimize cost
train=optimizer.minimize(cost)
sess=tf.Session()
#initialize var
sess.run(tf.global_variables_initializer())
#learning
for step in range(2001):
cost_val,W_val,b_val,_=sess.run([cost,W,b,train],
feed_dict={X:[1,2,3,4,5],Y:[2.1,3.1,4.1,5.1,6.1]})
if step%20==0:
print(step,cost_val,W_val,b_val)
#evlauation
print(sess.run(hypothesis,feed_dict={X:[5]}))
print(sess.run(hypothesis,feed_dict={X:[2.5]}))
print(sess.run(hypothesis,feed_dict={X:[1.5,3.5]}))
|
import chainer
import chainer.functions as F
import chainer.links as L
"""
Based on chainer official example
https://github.com/pfnet/chainer/tree/master/examples/ptb
Modified by shi3z March 28,2016
"""
class RNNLM(chainer.Chain):
"""Recurrent neural net languabe model for penn tree bank corpus.
This is an example of deep LSTM network for infinite length input.
"""
def __init__(self, n_input_units=1000,n_vocab=100, n_units=100, train=True):
super(RNNLM, self).__init__(
inputVector= L.Linear(n_input_units, n_units),
embed=L.EmbedID(n_vocab, n_units),
l1=L.LSTM(n_units, n_units),
l2=L.LSTM(n_units, n_units),
l3=L.Linear(n_units, n_vocab),
)
self.train = train
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
def __call__(self, x,mode=0):
if mode == 1:
h0 = self.inputVector(x)
else:
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0, train=self.train))
h2 = self.l2(F.dropout(h1, train=self.train))
y = self.l3(F.dropout(h2, train=self.train))
return y
|
import mitsuba
import pytest
import os
import enoki as ek
def test01_construct(variant_scalar_rgb):
from mitsuba.core.xml import load_string
# With default reconstruction filter
film = load_string("""<film version="2.0.0" type="hdrfilm"></film>""")
assert film is not None
assert film.reconstruction_filter() is not None
# With a provided reconstruction filter
film = load_string("""<film version="2.0.0" type="hdrfilm">
<rfilter type="gaussian">
<float name="stddev" value="18.5"/>
</rfilter>
</film>""")
assert film is not None
assert film.reconstruction_filter().radius() == (4 * 18.5)
# Certain parameter values are not allowed
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="component_format" value="uint8"/>
</film>""")
with pytest.raises(RuntimeError):
load_string("""<film version="2.0.0" type="hdrfilm">
<string name="pixel_format" value="brga"/>
</film>""")
def test02_crops(variant_scalar_rgb):
from mitsuba.core.xml import load_string
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_width" value="11"/>
<integer name="crop_height" value="5"/>
<integer name="crop_offset_x" value="2"/>
<integer name="crop_offset_y" value="3"/>
<boolean name="high_quality_edges" value="true"/>
<string name="pixel_format" value="rgba"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [11, 5])
assert ek.all(film.crop_offset() == [2, 3])
assert film.has_high_quality_edges()
# Crop size doesn't adjust its size, so an error should be raised if the
# resulting crop window goes out of bounds.
incomplete = """<film version="2.0.0" type="hdrfilm">
<integer name="width" value="32"/>
<integer name="height" value="21"/>
<integer name="crop_offset_x" value="30"/>
<integer name="crop_offset_y" value="20"/>"""
with pytest.raises(RuntimeError):
film = load_string(incomplete + "</film>")
film = load_string(incomplete + """
<integer name="crop_width" value="2"/>
<integer name="crop_height" value="1"/>
</film>""")
assert film is not None
assert ek.all(film.size() == [32, 21])
assert ek.all(film.crop_size() == [2, 1])
assert ek.all(film.crop_offset() == [30, 20])
@pytest.mark.parametrize('file_format', ['exr', 'rgbe', 'pfm'])
def test03_develop(variant_scalar_rgb, file_format, tmpdir):
from mitsuba.core.xml import load_string
from mitsuba.core import Bitmap, Struct, ReconstructionFilter, float_dtype
from mitsuba.render import ImageBlock
import numpy as np
"""Create a test image. Develop it to a few file format, each time reading
it back and checking that contents are unchanged."""
np.random.seed(12345 + ord(file_format[0]))
# Note: depending on the file format, the alpha channel may be automatically removed.
film = load_string("""<film version="2.0.0" type="hdrfilm">
<integer name="width" value="41"/>
<integer name="height" value="37"/>
<string name="file_format" value="{}"/>
<string name="pixel_format" value="rgba"/>
<string name="component_format" value="float32"/>
<rfilter type="box"/>
</film>""".format(file_format))
# Regardless of the output file format, values are stored as XYZAW (5 channels).
contents = np.random.uniform(size=(film.size()[1], film.size()[0], 5))
# RGBE and will only reconstruct well images that have similar scales on
# all channel (because exponent is shared between channels).
if file_format is "rgbe":
contents = 1 + 0.1 * contents
# Use unit weights.
contents[:, :, 4] = 1.0
block = ImageBlock(film.size(), 5, film.reconstruction_filter())
block.clear()
for x in range(film.size()[1]):
for y in range(film.size()[0]):
block.put([y+0.5, x+0.5], contents[x, y, :])
film.prepare(['X', 'Y', 'Z', 'A', 'W'])
film.put(block)
with pytest.raises(RuntimeError):
# Should raise when the destination file hasn't been specified.
film.develop()
filename = str(tmpdir.join('test_image.' + file_format))
film.set_destination_file(filename)
film.develop()
# Read back and check contents
other = Bitmap(filename).convert(Bitmap.PixelFormat.XYZAW, Struct.Type.Float32, srgb_gamma=False)
img = np.array(other, copy=False)
if False:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(contents[:, :, :3])
plt.subplot(1, 3, 2)
plt.imshow(img[:, :, :3])
plt.subplot(1, 3, 3)
plt.imshow(ek.sum(ek.abs(img[:, :, :3] - contents[:, :, :3]), axis=2), cmap='coolwarm')
plt.colorbar()
plt.show()
if file_format == "exr":
assert ek.allclose(img, contents, atol=1e-5)
else:
if file_format == "rgbe":
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-2), \
'\n{}\nvs\n{}\n'.format(img[:4, :4, :3], contents[:4, :4, :3])
else:
assert ek.allclose(img[:, :, :3], contents[:, :, :3], atol=1e-5)
# Alpha channel was ignored, alpha and weights should default to 1.0.
assert ek.allclose(img[:, :, 3:5], 1.0, atol=1e-6)
|
import pytest
class TestParseParameter:
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4.0),
("PARAMETER=4", 4.0),
("PARAMETER WARNING = 4", 4.0),
("PARAMETER = _=4", 4.0),
("WARNING = PARAMETER = 4", 4.0),
("PARAMETER = .4", 0.4),
],
)
def test_parse_float(self, values, query):
input_, output = values
assert query._parse_parameter_float_response(input_) == output
@pytest.mark.parametrize(
"values",
[
("PARAMETER test = 4", 4),
("PARAMETER=4", 4),
("PARAMETER WARNING = 4", 4),
("PARAMETER = _=4", 4),
("WARNING = PARAMETER = 4", 4),
("PARAMETER = .4", 0),
],
)
def test_parse_int(self, values, query):
input_, output = values
assert query._parse_parameter_integer_response(input_) == output
def test_parse_float_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_float_response(input_)
def test_parse_int_type_warning(self, query):
input_ = "WARNING PARAMETER = 4"
with pytest.warns(UserWarning):
query._parse_parameter_integer_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_float_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_float_response(input_)
@pytest.mark.parametrize(
"value", ["parameter test = 4", "PARAMETER 4", "WARNING = 4", ""]
)
def test_parse_int_type_error(self, value, query):
input_ = value
with pytest.raises(TypeError):
query._parse_parameter_integer_response(input_)
class TestRunQuery:
@pytest.mark.parametrize('command', [('KX(1)', float), ('KP(1,1,1)', int)])
def test_run_query_returned_type(self, line_geometry, command):
q, kps, l0 = line_geometry
cmd, type_ = command
integer = False if type_ == float else True
v = q._run_query(cmd, integer=integer)
assert isinstance(v, type_)
def test_interactive_mode_error(self, mapdl, line_geometry):
q, kps, l0 = line_geometry
with mapdl.non_interactive:
with pytest.raises(RuntimeError):
v = q.kx(1)
@pytest.mark.skip_grpc # only works in gRPC mode
def test_nopr_mode(self, mapdl, line_geometry):
try:
# enter no printout mode
mapdl._run('/NOPR', mute=True)
assert mapdl.prep7() is None
# verify that queries still work
q, kps, l0 = line_geometry
assert q.kx(2) == 1.0
finally:
# always return printing
mapdl._run('/GOPR', mute=True)
|
import requests_cache
from requests_cache import SQLiteCache
requests_cache.install_cache(
"grabtrack_sqlite_cache", SQLiteCache("spotify_api_cache", timeout=30)
)
|
import json
from re import match
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.shortcuts import redirect, render
from gui.mon.forms import BaseAlertFilterForm
from gui.utils import collect_view_data
from gui.decorators import ajax_required, profile_required, admin_required
from api.decorators import setting_required
from api.utils.views import call_api_view
from api.mon.alerting.views import mon_alert_list
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def mon_server_redirect(request):
"""
Monitoring management.
"""
if match("^http", request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL):
return redirect(request.dc.settings.MON_ZABBIX_SERVER_EXTERNAL_URL)
else:
return redirect(request.dc.settings.MON_ZABBIX_SERVER)
@login_required
@admin_required
@ajax_required
@require_POST
def alert_list_table(request):
context = collect_view_data(request, 'mon_alert_list')
try:
api_data = json.loads(request.POST.get('alert_filter', None))
except (ValueError, TypeError):
context['error'] = 'Unexpected error: could not parse alert filter.'
else:
context['alert_filter'] = api_data
res = call_api_view(request, 'GET', mon_alert_list, data=api_data)
if res.status_code == 200:
context['alerts'] = res.data['result']
elif res.status_code == 201:
context['error'] = 'Unexpected error: got into an API loop.'
else:
context['error'] = res.data.get('result', {}).get('error', res.data)
return render(request, 'gui/mon/alert_table.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def alert_list(request):
context = collect_view_data(request, 'mon_alert_list')
data = request.GET.copy()
data.pop('_', None)
if not data and request.user.is_staff and request.dc.is_default():
data['show_nodes'] = True
context['filters'] = form = BaseAlertFilterForm(request, data)
context['init'] = True
if form.is_valid() and form.api_data is not None: # new visit, or form submission
context['alert_filter'] = form.api_data
context['alert_filter_ok'] = True
else:
context['alert_filter_ok'] = False # Do not run javascript API TASKs!
return render(request, 'gui/mon/alert_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def hostgroup_list(request):
context = collect_view_data(request, 'mon_hostgroup_list')
return render(request, 'gui/mon/hostgroup_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def template_list(request):
context = collect_view_data(request, 'mon_template_list')
return render(request, 'gui/mon/template_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def action_list(request):
context = collect_view_data(request, 'mon_action_list')
return render(request, 'gui/mon/action_list.html', context)
@login_required
@admin_required
@profile_required
@setting_required('MON_ZABBIX_ENABLED')
def webcheck_list(request):
context = collect_view_data(request, 'mon_webcheck_list')
return render(request, 'gui/mon/webcheck_list.html', context)
|
# Primer juego...
print("Mi poesia:")
print("Las rosas son Rojas")
print("Las violetas son Azules")
print("Y yo te amo a ti")
# Mad Libs
# ingresar palabras random, adjetivos, verbos, sustantivos.
print("Ahora te toca a vos")
print("")
color = input("Ingrese un color: ")
sustantivo_plular = input("Ingrese un sustantivo en plural: ")
celebridad = input("Ingrese el nombre de una celebridad: ")
print("Las rosas son " + color)
print( sustantivo_plular + " son Azules")
print("Y yo te amo a ti " + celebridad )
|
#!/usr/bin/env python
import sys
if __name__ == '__main__':
total = 0.0
n = 0
for line in sys.stdin:
total += float(line)
n += 1
print total / n
|
import logging
from . import common_functions as c_f
import os
import torch
from collections import defaultdict
import sqlite3
# You can write your own hooks for logging.
# But if you'd like something that just works, then use this HookContainer.
# You'll need to install record-keeper and tensorboard.
# pip install record-keeper tensorboard
class HookContainer:
def __init__(self, record_keeper,
record_group_name_prefix=None,
primary_metric="mean_average_precision_at_r",
validation_split_name="val"):
self.record_keeper = record_keeper
self.record_group_name_prefix = record_group_name_prefix
self.saveable_trainer_objects = ["models", "optimizers", "lr_schedulers", "loss_funcs", "mining_funcs"]
self.primary_metric = primary_metric
self.validation_split_name = validation_split_name
############################################
############################################
################## HOOKS #################
############################################
############################################
### Define the end_of_iteration hook. This will be executed at the end of every iteration. ###
def end_of_iteration_hook(self, trainer):
record_these = [[trainer.loss_tracker.losses, {"input_group_name_for_non_objects": "loss_histories"}],
[trainer.loss_tracker.loss_weights, {"input_group_name_for_non_objects": "loss_weights"}],
[trainer.loss_funcs, {"recursive_types": [torch.nn.Module]}],
[trainer.mining_funcs, {}],
[trainer.models, {}],
[trainer.optimizers, {"custom_attr_func": self.optimizer_custom_attr_func}]]
for record, kwargs in record_these:
self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)
# This hook will be passed into the trainer and will be executed at the end of every epoch.
def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, test_collate_fn=None):
if not self.primary_metric in tester.accuracy_calculator.get_curr_metrics():
raise ValueError("HookContainer `primary_metric` must be one of: {}".format(tester.accuracy_calculator.get_curr_metrics()))
if not os.path.exists(model_folder): os.makedirs(model_folder)
def actual_hook(trainer):
continue_training = True
if trainer.epoch % test_interval == 0:
best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, test_collate_fn)
continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)
return continue_training
return actual_hook
def end_of_testing_hook(self, tester):
for split_name, accuracies in tester.all_accuracies.items():
epoch = accuracies["epoch"]
self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
_, _, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, split_name, epoch)
best = {"best_epoch":best_epoch, "best_accuracy": best_accuracy}
self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
for split_name, u in tester.dim_reduced_embeddings.items():
for k, (dim_reduced, labels) in u.items():
tag = '%s/%s'%(self.record_group_name(tester, split_name), k)
self.record_keeper.add_embedding_plot(dim_reduced, labels, tag, epoch)
############################################
############################################
######### MODEL LOADING AND SAVING #########
############################################
############################################
def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):
if device is None: device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
resume_epoch, model_suffix = c_f.latest_version(model_folder, "trunk_*.pth", best=best)
if resume_epoch > 0:
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)
return resume_epoch + 1
def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)
if prev_suffix is not None:
c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder)
def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, collate_fn):
epoch = trainer.epoch
tester.test(dataset_dict, epoch, trainer.models["trunk"], trainer.models["embedder"], list(dataset_dict.keys()), collate_fn)
prev_best_epoch, _ = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)
is_new_best, curr_accuracy, best_epoch, best_accuracy = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)
self.record_keeper.save_records()
trainer.step_lr_plateau_schedulers(curr_accuracy)
self.save_models(trainer, model_folder, epoch, epoch-test_interval) # save latest model
if is_new_best:
logging.info("New best accuracy! {}".format(curr_accuracy))
curr_suffix = "best%d"%best_epoch
prev_suffix = "best%d"%prev_best_epoch if prev_best_epoch is not None else None
self.save_models(trainer, model_folder, curr_suffix, prev_suffix) # save best model
return best_epoch
def is_new_best_accuracy(self, tester, split_name, epoch):
curr_accuracy = self.get_curr_primary_metric(tester, split_name)
best_epoch, best_accuracy = self.get_best_epoch_and_accuracy(tester, split_name)
is_new_best = False
if (curr_accuracy > best_accuracy) or (best_epoch is None):
best_epoch, best_accuracy = epoch, curr_accuracy
is_new_best = True
return is_new_best, curr_accuracy, best_epoch, best_accuracy
############################################
############################################
##### BEST EPOCH AND ACCURACY TRACKING #####
############################################
############################################
def get_loss_history(self, loss_names=()):
columns = "*" if len(loss_names) == 0 else ", ".join(loss_names)
table_name = "loss_histories"
if not self.record_keeper.table_exists(table_name):
return {}
output = self.record_keeper.query("SELECT {} FROM {}".format(columns, table_name), return_dict=True)
output.pop("id", None)
return output
def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return {}
def get_accuracies(keys):
keys = "*" if return_all_metrics else "epoch, %s"%keys
query = "SELECT {} FROM {}".format(keys, table_name)
return self.record_keeper.query(query, return_dict=True)
keys = metrics if len(metrics) > 0 else [self.primary_metric]
output = self.try_keys(keys, tester, get_accuracies)
output.pop("id", None)
return output
def get_curr_primary_metric(self, tester, split_name):
def get_curr(key):
return tester.all_accuracies[split_name][key]
return self.try_primary_metric(tester, get_curr)
def try_keys(self, input_keys, tester, input_func):
for average in [True, False]:
keys = ", ".join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])
try:
return input_func(keys)
except (KeyError, sqlite3.OperationalError):
pass
raise KeyError
def try_primary_metric(self, tester, input_func):
return self.try_keys([self.primary_metric], tester, input_func)
# returns accuracies of a specified epoch
def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return []
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
query = "SELECT %s FROM %s WHERE epoch=?"%(columns, table_name)
return self.record_keeper.query(query, (epoch, ))
return self.try_primary_metric(tester, get_accuracies)
# returns accuracies of best epoch and the metric name used to determine best acuracy
def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=(-1,)):
table_name = self.record_group_name(tester, split_name)
if not self.record_keeper.table_exists(table_name):
return [], None
def get_accuracies(key):
columns = "*" if select_all else "epoch, %s"%key
params = ", ".join(["?"]*len(ignore_epoch))
query = """SELECT {0} FROM {1} WHERE {2}=
(SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))
AND epoch NOT IN ({3})""".format(columns, table_name, key, params)
output = self.record_keeper.query(query, ignore_epoch+ignore_epoch)
return output, key
return self.try_primary_metric(tester, get_accuracies)
def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=(-1,)):
accuracies, key = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)
if len(accuracies) > 0:
return accuracies[0]["epoch"], accuracies[0][key]
return None, 0
def patience_remaining(self, epoch, best_epoch, patience):
if patience is not None and best_epoch is not None:
if epoch - best_epoch > patience:
logging.info("Validation accuracy has plateaued. Exiting.")
return False
return True
def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):
if skip_eval_if_already_done:
splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)
if len(splits_to_eval) == 0:
logging.info("Already evaluated")
return False
tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)
return True
def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):
input_splits_to_eval = list(dataset_dict.keys()) if input_splits_to_eval is None else input_splits_to_eval
splits_to_eval = []
for split in input_splits_to_eval:
if len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0:
splits_to_eval.append(split)
return splits_to_eval
def base_record_group_name(self, tester):
base_record_group_name = "%s_"%self.record_group_name_prefix if self.record_group_name_prefix else ''
base_record_group_name += tester.description_suffixes("accuracies")
return base_record_group_name
def record_group_name(self, tester, split_name):
base_record_group_name = self.base_record_group_name(tester)
return "%s_%s"%(base_record_group_name, split_name.upper())
def optimizer_custom_attr_func(self, optimizer):
return {"lr": optimizer.param_groups[0]["lr"]}
class EmptyContainer:
def end_of_epoch_hook(self, *args):
return None
end_of_iteration_hook = None
end_of_testing_hook = None
def get_record_keeper(csv_folder, tensorboard_folder, global_db_path=None, experiment_name=None, is_new_experiment=True, save_figures=False, save_lists=False):
try:
import record_keeper as record_keeper_package
from torch.utils.tensorboard import SummaryWriter
record_writer = record_keeper_package.RecordWriter(folder = csv_folder,
global_db_path = global_db_path,
experiment_name = experiment_name,
is_new_experiment = is_new_experiment,
save_lists = save_lists)
tensorboard_writer = SummaryWriter(log_dir=tensorboard_folder)
record_keeper = record_keeper_package.RecordKeeper(tensorboard_writer = tensorboard_writer,
record_writer = record_writer,
attributes_to_search_for = c_f.list_of_recordable_attributes_list_names(),
save_figures=save_figures)
return record_keeper, record_writer, tensorboard_writer
except ModuleNotFoundError as e:
logging.warn(e)
logging.warn("There won't be any logging or model saving.")
logging.warn("To fix this, pip install record-keeper tensorboard")
return None, None, None
def get_hook_container(record_keeper, **kwargs):
if record_keeper:
return HookContainer(record_keeper, **kwargs)
else:
logging.warn("No record_keeper, so no preset hooks are being returned.")
return EmptyContainer()
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from mxnet.gluon.data import ArrayDataset
import mxnet
from .data import MetaTaskDataContainer, TaskDataContainer
from .config import DEFAULT_CONFIG_SYNTHETIC
class MetaTaskSynthetic(MetaTaskDataContainer):
def __init__(self, config=None, weights=None, bias=None, seed=1, context=None):
"""
:param config: If None, DEFAULT_CONFIG_SYNTHETIC is loaded.
:param weights: Tasks' weights matrix. Row k corresponds to the weight parameters of task k. If None, w is
sampled from a N(0,1).
:param bias: Tasks' biases vector. Row k corresponds to the bias parameters of task k. If None, w is sampled
from a N(0,1).
:param seed: Seed for random generator.
"""
if config is None:
config = DEFAULT_CONFIG_SYNTHETIC
self.config = config
self.weights = weights
self.bias = bias
if context is None:
context = mxnet.cpu()
self.context = context
self.seed = seed
random.seed(self.seed)
num_tasks_train = config["num_tasks_train"]
num_tasks_test = config["num_tasks_test"]
num_tasks_val = config["num_tasks_val"]
num_tasks = num_tasks_train + num_tasks_test + num_tasks_val
self.num_tasks = num_tasks
self._generate_parameters()
self._validate_parameters()
num_examples = config["num_examples_per_task"]
std_x = config["std_x"]
hold_out = config["hold_out"]
noise = config["std_noise"]
# Generate the training/test/val dataset.
# Each dataset is a list of TaskSynthetic objects (one per task)
data_train = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(0, num_tasks_train)]
data_test = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train, num_tasks_train + num_tasks_test)]
data_val = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,
context=context)
for t in np.arange(num_tasks_train + num_tasks_test, num_tasks)]
super().__init__(data_train, data_test, data_val, context=context)
def plot_sample(self, root="./sample_synth"):
"""Plot N images from each alphabet and store the images in root."""
if self.weights.shape[1] != 2:
raise ValueError("Only 2D datasets can be plot.")
if not os.path.exists(root):
os.makedirs(root)
fig_train = self._plot([dd._train_dataset for dd in self.train_tasks],
"Training Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_train_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._train_dataset for dd in self.test_tasks],
"Training Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_train_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._train_dataset for dd in self.val_tasks],
"Training Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_train_val_tasks.png"))
del fig_val
if self.config["hold_out"] > 0:
fig_train = self._plot([dd._val_dataset for dd in self.train_tasks],
"Validation Samples for Training Tasks")
fig_train.savefig(os.path.join(root, "sample_val_train_tasks.png"))
del fig_train
fig_test = self._plot([dd._val_dataset for dd in self.test_tasks],
"Validation Samples for Test Tasks")
fig_test.savefig(os.path.join(root, "sample_val_test_tasks.png"))
del fig_test
fig_val = self._plot([dd._val_dataset for dd in self.val_tasks],
"Validation Samples for Validation Tasks")
fig_val.savefig(os.path.join(root, "sample_val_val_tasks.png"))
del fig_val
def _plot(self, data, title):
"""Helper function for plotting."""
num_tasks = len(data)
fig, ax = plt.subplots(1, num_tasks, figsize=(num_tasks*5, 5))
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)
for mm in range(num_tasks):
X, y = data[mm][:]
X = X.asnumpy()
y = y.asnumpy()
ax[mm].scatter(X[:, 0], X[:, 1], c=y.flatten())
fig.suptitle(title, size=18)
return fig
def _validate_parameters(self):
if self.weights.shape[0] != self.num_tasks:
raise ValueError("Number of rows in w must be equal to the total number of tasks")
if len(self.bias) != self.num_tasks:
raise ValueError("Length of b must be equal to the total number of tasks")
def _generate_parameters(self):
if self.weights is None:
dim = self.config["dim"]
self.weights = self.config["global_bias"] + mxnet.nd.random_normal(shape=(self.num_tasks, dim),
ctx=self.context)
if self.bias is None:
if self.config["task_bias"]:
self.bias = mxnet.nd.random_normal(shape=self.num_tasks, ctx=self.context)
else:
self.bias = mxnet.nd.zeros(num_tasks, ctx=self.context)
class TaskSynthetic(TaskDataContainer):
"""
Synthetic Task Container: Linear Regression.
"""
def __init__(self, w, b, num_examples, std_x, noise, hold_out=None, seed=None, context=None):
"""
:param w: Task's weights vector.
:param b: Task's bias.
:param num_examples: Total number of examples per task.
:param std_x: The covariates are sampled from a zero mean normal distribution with
standard deviation equal to std_x.
:param hold_out: Number of examples to hold out for validation
:param seed: seed for the random generator
"""
self.w = w
self.b = b
self.num_examples = num_examples
self.seed = seed
if context is None:
context = mxnet.cpu()
self.context = context
if seed:
random.seed(seed)
if hold_out and hold_out < num_examples:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples - hold_out, len(w)),
ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
Xval, Yval = self._real_fn(std_x * mxnet.nd.random_normal(shape=(hold_out, len(w)), ctx=context), noise)
val_dataset = ArrayDataset(Xval, Yval)
else:
Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples, len(w)), ctx=context), noise)
train_dataset = ArrayDataset(Xtr, Ytr)
val_dataset = None
super().__init__(train_dataset, val_dataset, context=context)
def _real_fn(self, X, noise):
y = mxnet.nd.dot(X, mxnet.nd.expand_dims(self.w, axis=1)) + self.b
if noise > 0.0:
y += mxnet.nd.expand_dims(noise * mxnet.nd.random_normal(shape=(X.shape[0],)), axis=1)
return X, y
if __name__ == '__main__':
s1 = MetaTaskSynthetic()
s1.plot_sample()
batch_size = 20
train_tasks = s1.train_tasks
assert len(s1.train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
dim = 2
num_tasks = 15
w = mxnet.nd.random_normal(shape=(num_tasks, dim))
b = mxnet.nd.random_normal(shape=num_tasks)
s2 = MetaTaskSynthetic(weights=w, bias=b)
s2.plot_sample(root="./sample_synth_w_b_given")
batch_size = 20
train_tasks = s2.train_tasks
assert len(train_tasks) == 3
for task in train_tasks:
tr_iterator = task.get_train_iterator(batch_size)
for data in tr_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
val_iterator = task.get_val_iterator(batch_size)
for data in val_iterator:
assert (data[0].shape == (batch_size, 2))
assert (data[1].shape == (batch_size, 1))
assert (data[1].asnumpy().dtype == np.float32)
break
|
'''
A: suffix solution
1. subproblems: define dp(i, j) = is_match(s[i:], p[j:]), suffix
2. guess,
2.1 the current char in p is a '*'
- use '*', repeat the char before it
- do not use '*', skip to next char after '*'
2.2 current char in s and p are match, s[i] == p[j] or p[j] == '.'
3. relate subproblems:
dp(i, j) = match(s[i:], s[j:])
dp(i, j) =
a. if j + 1 is in bound and p[j + 1] == '*', then
dp(i, j + 2) or (s[i] = p[j] or p[j] = '.' and dp(i + 1, j))
b. if s[i] == p[j] or p[j] == '.', then dp(i + 1, j + 1)
c. esle false
B: prefix solution
1. subproblems: define dp(i, j) = is_match(s[:i], p[:j]), prefix
2. guess,
2.1 current char in s and p are match, s[i] == p[j] or p[j] == '.'
2.2 the current char in p is a '*'
- use '*', repeat the char before it
- do not use '*', skip to next char after '*'
3. relate subproblems:
dp(i, j) = match(s[:i], s[:j])
dp(i, j) =
a. if s[i] == p[j] or p[j] == '.', then dp(i - 1, j - 1)
b. if p[j] == '*', then
dp(i, j - 2) or (s[i] = p[j - 1] or p[j - 1] = '.' and dp(i - 1, j))
c. else false
reference:
1. https://www.youtube.com/watch?v=HAA8mgxlov8 (use * or no use)
2. https://www.youtube.com/watch?v=l3hda49XcDE (dp solution)
'''
class Solution:
def isMatch(self, s: str, p: str) -> bool:
# Somtimes there still matches even s is out of bound, but p is still in bound(s:a, p: a*b*).
# But if p is out of bound, then we must return false
# return self.dfs_td(s, p, 0, 0, {})
# return self.dfs_prefix(s, p, len(s) - 1, len(p) - 1)
# return self.dp_bottome_up_prefix(s, p)
return self.dp_bottom_up_suffix(s, p)
# top down, dfs + memorization, suffix
def dfs_suffix(self, s, p, i, j, memo):
# base case
# if both i and j are out of boud, then we found our solution
if (i, j) in memo:
return memo[(i, j)]
if i >= len(s) and j >= len(p):
return True
# if i is in bound, but j is out of bound, return false.
if j >= len(p):
return False
# 注意括号的顺序, 在i没有超出数组下标的范围的情况下, 判断是否有匹配
match = i < len(s) and (s[i] == p[j] or p[j] == '.')
# if the next character in p is a star(need to prevent the j + 1 go byond the bounday)
if j + 1 < len(p) and p[j + 1] == '*':
# either repeating the current character in p and move to the next character in s
# or no repeating in p and skip to next character in p
memo[(i, j)] = (match and self.dfs_td(s, p, i + 1, j, memo)) or self.dfs_td(s, p, i, j + 2, memo)
return memo[(i, j)]
# if it is not a star but a match found in the current index of s and p
if match:
memo[(i, j)] = self.dfs_td(s, p, i + 1, j + 1, memo)
return memo[(i, j)]
# if no a match and next character is not star
memo[(i, j)] = False
return False
# bottom up solution, suffix.
def dp_bottom_up_suffix(self, s, p):
s_len = len(s)
p_len = len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[s_len][p_len] = True
# deal with the case like a*b*c* for the last row
for j in range(p_len - 2, -1, -1):
if p[j + 1] == '*':
dp[s_len][j] = dp[s_len][j + 2]
for i in range(s_len - 1, -1, -1):
for j in range(p_len - 1, -1, -1):
# for suffix, checking '*' goes first.
if j <= p_len - 2 and p[j + 1] == '*':
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j]
dp[i][j] = (dp[i][j] or dp[i][j + 2])
continue
if s[i] == p[j] or p[j] == '.':
dp[i][j] = dp[i + 1][j + 1]
for i in dp:
print(i)
print()
return dp[0][0]
# top down solution, start at (n, n)
def dfs_prefix(self, s, p, i, j):
# base case
if i < 0 and j < 0:
return True
# if i is in bound, but j is out of bound, return false.
if j < 0:
return False
# if the current char is a star
if j >= 0 and p[j] == '*':
# check if there is a match of the current char in s and previous char in p(before *)
match = (i >= 0) and (s[i] == p[j - 1] or p[j - 1] == '.')
# if current charts match, then go dp(i-1, j), if no match, go check dp(i, j-2)
return (match and self.dfs_prefix(s, p, i - 1, j)) or self.dfs_prefix(s, p, i, j - 2)
# if there is a match of the current char in s and p
if i >= 0 and (s[i] == p[j] or p[j] == '.'):
return self.dfs_prefix(s, p, i - 1, j - 1)
return False
# bottom up algorithm, start from dp(0,0) -> dp(n, n)
def dp_bottome_up_prefix(self, s, p):
s_len, p_len = len(s), len(p)
dp = [[False for col in range(p_len + 1)] for row in range(s_len + 1)]
dp[0][0] = True
# handle the pattern like a*, a*b* or a*b*c* for the 0th row
for j in range(1, p_len + 1):
if p[j - 1] == '*':
dp[0][j] = dp[0][j - 2]
for i in range(1, s_len + 1):
for j in range(1, p_len + 1):
if s[i - 1] == p[j - 1] or p[j - 1] == '.':
dp[i][j] = dp[i - 1][j - 1]
continue
if p[j - 1] == '*':
if s[i - 1] == p[j - 2] or p[j - 2] == '.':
dp[i][j] = dp[i - 1][j]
dp[i][j] = (dp[i][j] or dp[i][j - 2])
for i in dp:
print(i)
print()
return dp[s_len][p_len]
s = 'aab'
p = 'c*a*b'
# s = 'aaa'
# p = 'aaaa'
# s = "a"
# p = ".*..a*"
s = 'aa'
p = 'a*'
sol = Solution()
print(sol.isMatch(s, p))
x = 'abc'
print(x[1:1])
|
import argparse
import imp
import importlib
import random
from opentamp.src.policy_hooks.vae.vae_main import MultiProcessMain
def load_config(args, reload_module=None):
config_file = args.config
if config_file != '':
if reload_module is not None:
config_module = reload_module
imp.reload(config_module)
else:
config_module = importlib.import_module('policy_hooks.'+config_file)
config = config_module.config
else:
config_module = None
config = {}
config['use_local'] = not args.remote
config['num_conds'] = args.nconds if args.nconds > 0 else config['num_conds'] if 'num_conds' in config else 1
if 'common' in config:
config['common']['num_conds'] = config['num_conds']
config['num_objs'] = args.nobjs if args.nobjs > 0 else config['num_objs'] if 'num_objs' in config else 1
config['weight_dir'] = config['base_weight_dir'] + str(config['num_objs']) if 'base_weight_dir' in config else args.weight_dir
config['log_timing'] = args.timing
config['hl_timeout'] = 0
config['rollout_server'] = args.rollout_server or args.all_servers
config['vae_server'] = args.vae_server or args.all_servers
config['viewer'] = args.viewer
config['server_id'] = args.server_id if args.server_id != '' else str(random.randint(0,2**32))
config['n_rollout_servers'] = args.n_rollout_servers
config['no_child_process'] = args.no_child_process
config['rollout_len'] = args.rollout_len
config['train_vae'] = args.train_vae
config['unconditional'] = args.unconditional
config['train_reward'] = args.train_reward
config['load_step'] = args.load_step
config['train_params'] = {
'use_recurrent_dynamics': args.use_recurrent_dynamics,
'use_overshooting': args.use_overshooting,
'data_limit': args.train_samples if args.train_samples > 0 else None,
'beta': args.beta,
'overshoot_beta': args.overshoot_beta,
'dist_constraint': args.dist_constraint,
}
return config, config_module
def load_env(args, reload_module=None):
env_path = args.environment_path
if reload_module is not None:
module = reload_module
imp.reload(module)
else:
module = importlib.import_module(env_path)
env = args.environment
return getattr(module, env)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='')
parser.add_argument('-wd', '--weight_dir', type=str, default='')
parser.add_argument('-nf', '--nofull', action='store_true', default=False)
parser.add_argument('-n', '--nconds', type=int, default=0)
parser.add_argument('-o', '--nobjs', type=int, default=0)
# parser.add_argument('-ptt', '--pretrain_timeout', type=int, default=300)
parser.add_argument('-hlt', '--hl_timeout', type=int, default=0)
parser.add_argument('-k', '--killall', action='store_true', default=True)
parser.add_argument('-r', '--remote', action='store_true', default=False)
parser.add_argument('-t', '--timing', action='store_true', default=False)
parser.add_argument('-vae', '--vae_server', action='store_true', default=False)
parser.add_argument('-sim', '--rollout_server', action='store_true', default=False)
parser.add_argument('-all', '--all_servers', action='store_true', default=False)
parser.add_argument('-v', '--viewer', action='store_true', default=False)
parser.add_argument('-id', '--server_id', type=str, default='')
parser.add_argument('-env_path', '--environment_path', type=str, default='')
parser.add_argument('-env', '--environment', type=str, default='')
parser.add_argument('-tamp', '--use_tamp', type=str, default='')
parser.add_argument('-nrs', '--n_rollout_servers', type=int, default=1)
parser.add_argument('-ncp', '--no_child_process', action='store_true', default=False)
parser.add_argument('-rl', '--rollout_len', type=int, default=0)
parser.add_argument('-tv', '--train_vae', action='store_true', default=False)
parser.add_argument('-uncond', '--unconditional', action='store_true', default=False)
parser.add_argument('-tr', '--train_reward', action='store_true', default=False)
parser.add_argument('-loadstep', '--load_step', type=int, default=-1)
parser.add_argument('-beta', '--beta', type=int, default=1)
parser.add_argument('-beta_d', '--overshoot_beta', type=int, default=1)
parser.add_argument('-nts', '--train_samples', type=int, default=-1)
parser.add_argument('-rnn', '--use_recurrent_dynamics', action='store_true', default=False)
parser.add_argument('-over', '--use_overshooting', action='store_true', default=False)
parser.add_argument('-dist', '--dist_constraint', action='store_true', default=False)
args = parser.parse_args()
config, config_module = load_config(args)
if args.config != '':
main = MultiProcessMain(config)
else:
env_cls = load_env(args)
main = MultiProcessMain.no_config_load(env_cls, args.environment, config)
main.start(kill_all=args.killall)
if __name__ == '__main__':
main()
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import mock
from tag_category_create import CategoryCreate
from vsphere_base_action_test_case import VsphereBaseActionTestCase
__all__ = [
'CategoryCreateTestCase'
]
class CategoryCreateTestCase(VsphereBaseActionTestCase):
__test__ = True
action_cls = CategoryCreate
@mock.patch("vmwarelib.actions.BaseAction.connect_rest")
def test_run(self, mock_connect):
action = self.get_action_instance(self.new_config)
# mock
expected_result = "result"
action.tagging = mock.Mock()
action.tagging.category_create.return_value = expected_result
# define test variables
category_name = "name"
category_description = "test description"
category_cardinality = "SINGLE"
category_types = []
vsphere = "default"
test_kwargs = {
"category_name": category_name,
"category_description": category_description,
"category_cardinality": category_cardinality,
"category_types": category_types,
"vsphere": vsphere
}
# invoke action with valid parameters
result = action.run(**test_kwargs)
self.assertEqual(result, expected_result)
action.tagging.category_create.assert_called_with(category_name,
category_description,
category_cardinality,
category_types)
mock_connect.assert_called_with(vsphere)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for reader Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.util import deprecation
_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.int64, dtypes.string)
def make_csv_dataset(
file_pattern,
batch_size,
column_keys,
column_defaults,
label_key=None,
field_delim=",",
use_quote_delim=True,
skip=0,
filter_fn=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
):
"""Reads CSV files into a dataset.
Reads CSV files into a dataset, where each element is a (features, labels)
tuple that corresponds to a batch of CSV rows. The features dictionary
maps feature column names to `Tensor`s containing the corresponding
feature data, and labels is a `Tensor` containing the batch's label data.
Args:
file_pattern: List of files or patterns of file paths containing CSV
records. See @{tf.gfile.Glob} for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
column_keys: A list of strings that corresponds to the CSV columns, in
order. One per column of the input record.
column_defaults: A list of default values for the CSV fields. One item per
column of the input record. Each item in the list is either one of the
following dtypes: float32, float64, int32, int64, or string, or a
`Tensor` with one of the aforementioned types. One item per column of
the input record, with either scalar default value for that column if it
is required, or, if the column is required, an empty `Tensor` or a dtype.
label_key: A optional string corresponding to the label column. If provided,
the data for this column is returned as a separate `Tensor` from the
features dictionary, so that the dataset complies with the format expected
by a `tf.Estimator.train` or `tf.Estimator.evaluate` input function.
field_delim: An optional `string`. Defaults to `","`. Char delimiter to
separate fields in a record.
use_quote_delim: An optional bool. Defaults to `True`. If false, treats
double quotation marks as regular characters inside of the string fields.
skip: An integer that corresponds to the number of lines to skip at the
head of each CSV file. Defaults to 0.
filter_fn: A callable function that takes in a CSV string and returns a
boolean that corresponds to whether the record should be included. If
None, does not filter records.
num_epochs: An int specifying the number of times this dataset is repeated.
If None, cycles through the dataset forever.
shuffle: A bool that indicates whether the input should be shuffled.
shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
ensures better shuffling, but would increase memory usage and startup
time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: An int specifying the number of feature batches to
prefetch for performance improvement. Recommended value is the number of
batches consumed per training step.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
to a batch of `batch_size` CSV rows. The features dictionary maps feature
column names to `Tensor`s containing the corresponding column data, and
labels is a `Tensor` containing the column data for the label column
specified by `label_key`.
"""
filenames = _get_file_names(file_pattern, False)
column_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in column_defaults
]
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
if label_key is not None:
assert label_key in column_keys
def filename_to_dataset(filename):
ds = core_readers.TextLineDataset(filename)
if skip > 0:
ds = ds.skip(skip)
if filter_fn is not None:
ds = ds.filter(filter_fn)
return ds
def decode_csv(line):
"""Decodes csv line into features.
Args:
line: String tensor corresponding to one csv record.
Returns:
A dictionary of feature names to values for that particular record. If
label_key is provided, extracts the label feature to be returned as the
second element of the tuple.
"""
columns = parsing_ops.decode_csv(
line,
column_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim)
features = dict(zip(column_keys, columns))
if label_key is not None:
label = features.pop(label_key)
return features, label
return features
# TODO(rachelim): interleave records from files for better shuffling
dataset = dataset.flat_map(filename_to_dataset)
# TODO(rachelim): use fused shuffle_and_repeat for perf
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
if num_epochs != 1:
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(decode_csv)
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
def make_batched_features_dataset(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=1,
reader_num_threads=1,
parser_num_threads=2,
sloppy_ordering=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step (default is 1).
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
Returns:
A dataset of `dict` elements. Each `dict` maps feature keys to
`Tensor` or `SparseTensor` objects.
"""
# Create dataset of all matching filenames
if shuffle:
dataset = dataset_ops.Dataset.list_files(file_pattern, shuffle=True)
else:
# TODO(b/73959787): Use Dataset.list_files() once ordering is deterministic.
filenames = _get_file_names(file_pattern, shuffle)
dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
# Read files sequentially (if reader_num_threads=1) or in parallel
dataset = dataset.apply(
interleave_ops.parallel_interleave(
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
sloppy=sloppy_ordering))
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset.output_types == (dtypes.string, dtypes.string):
dataset = dataset.map(lambda _, v: v)
# Apply dataset repeat and shuffle transformations.
repeat_dataset = (num_epochs != 1)
if repeat_dataset and shuffle:
# Used fused shuffle_and_repeat operation for better performance
dataset = dataset.apply(
shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
shuffle_seed))
elif repeat_dataset:
dataset = dataset.repeat(num_epochs)
elif shuffle:
dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
dataset = dataset.batch(batch_size)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.map(
lambda x: parsing_ops.parse_example(x, features),
num_parallel_calls=parser_num_threads)
# TODO(rachelim): Add an optional label_key argument for extracting the label
# from the features dictionary, to comply with the type expected by the
# input_fn to a `tf.Estimator.train` or `tf.Estimator.evaluate` function.
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@deprecation.deprecated(None,
"Use `tf.contrib.data.make_batched_features_dataset`")
def read_batch_features(file_pattern,
batch_size,
features,
reader=core_readers.TFRecordDataset,
reader_args=None,
randomize_input=True,
num_epochs=None,
capacity=10000):
"""Reads batches of Examples.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of consecutive elements of this
dataset to combine in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
reader_args: Additional arguments to pass to the reader class.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever.
capacity: Buffer size of the ShuffleDataset. A large capacity ensures better
shuffling but would increase memory usage and startup time.
Returns:
A dict from keys in features to `Tensor` or `SparseTensor` objects.
"""
dataset = make_batched_features_dataset(
file_pattern,
batch_size,
features,
reader=reader,
reader_args=reader_args,
shuffle=randomize_input,
num_epochs=num_epochs,
shuffle_buffer_size=capacity)
iterator = dataset.make_one_shot_iterator()
outputs = iterator.get_next()
return outputs
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("File pattern is empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError("No files match %s." % file_pattern)
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
class SqlDataset(dataset_ops.Dataset):
"""A `Dataset` consisting of the results from a SQL query."""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
dataset = tf.contrib.data.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Prints the rows of the result set of the above query.
while True:
try:
print(sess.run(next_element))
except tf.errors.OutOfRangeError:
break
```
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
super(SqlDataset, self).__init__()
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._output_types = output_types
def _as_variant_tensor(self):
return gen_dataset_ops.sql_dataset(self._driver_name,
self._data_source_name, self._query,
nest.flatten(self.output_types),
nest.flatten(self.output_shapes))
@property
def output_classes(self):
return nest.map_structure(lambda _: ops.Tensor, self._output_types)
@property
def output_shapes(self):
return nest.map_structure(lambda _: tensor_shape.TensorShape([]),
self._output_types)
@property
def output_types(self):
return self._output_types
|
# -*- coding: utf-8 -*-
"""WSGI server."""
import argparse
import sys
from flask import Flask, jsonify
from flask_cors import CORS
from werkzeug.exceptions import BadRequest, NotFound, MethodNotAllowed, \
Forbidden, InternalServerError
from projects.api.compare_results import bp as compare_results_blueprint
from projects.api.experiments import bp as experiments_blueprint
from projects.api.json_encoder import CustomJSONEncoder
from projects.api.operators import bp as operators_blueprint
from projects.api.parameters import bp as parameters_blueprint
from projects.api.projects import bp as projects_blueprint
from projects.api.tasks import bp as tasks_blueprint
from projects.api.templates import bp as templates_blueprint
from projects.database import db_session, init_db
from projects.samples import init_tasks
app = Flask(__name__)
app.json_encoder = CustomJSONEncoder
app.register_blueprint(projects_blueprint, url_prefix="/projects")
app.register_blueprint(compare_results_blueprint, url_prefix="/projects/<project_id>/comparisons")
app.register_blueprint(experiments_blueprint, url_prefix="/projects/<project_id>/experiments")
app.register_blueprint(tasks_blueprint, url_prefix="/tasks")
app.register_blueprint(parameters_blueprint, url_prefix="/tasks/<task_id>/parameters")
app.register_blueprint(operators_blueprint,
url_prefix="/projects/<project_id>/experiments/<experiment_id>/operators")
app.register_blueprint(templates_blueprint, url_prefix="/templates")
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.route("/", methods=["GET"])
def ping():
"""Handles GET requests to /."""
return "pong"
@app.errorhandler(BadRequest)
@app.errorhandler(NotFound)
@app.errorhandler(MethodNotAllowed)
@app.errorhandler(Forbidden)
@app.errorhandler(InternalServerError)
def handle_errors(e):
"""Handles exceptions raised by the API."""
return jsonify({"message": e.description}), e.code
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Projects API"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--enable-cors", action="count")
parser.add_argument(
"--debug", action="count", help="Enable debug"
)
parser.add_argument(
"--init-db", action="count", help="Create database and tables before the HTTP server starts"
)
parser.add_argument(
"--samples-config", help="Path to sample tasks config file."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
# Enable CORS if required
if args.enable_cors:
CORS(app)
# Initializes DB if required
if args.init_db:
init_db()
# Install sample tasks if required
if args.samples_config:
init_tasks(args.samples_config)
app.run(host="0.0.0.0", port=args.port, debug=args.debug)
|
from PIL import Image
import numpy as np
import os.path as osp
import glob
import os
import argparse
import yaml
parser = argparse.ArgumentParser(description='create a dataset')
parser.add_argument('--dataset', default='df2k', type=str, help='selecting different datasets')
parser.add_argument('--artifacts', default='', type=str, help='selecting different artifacts type')
parser.add_argument('--cleanup_factor', default=2, type=int, help='downscaling factor for image cleanup')
parser.add_argument('--upscale_factor', default=4, type=int, choices=[4], help='super resolution upscale factor')
opt = parser.parse_args()
# define input and target directories
with open('./preprocess/paths.yml', 'r') as stream:
PATHS = yaml.load(stream)
def noise_patch(rgb_img, sp, max_var, min_mean):
img = rgb_img.convert('L')
rgb_img = np.array(rgb_img)
img = np.array(img)
w, h = img.shape
collect_patchs = []
for i in range(0, w - sp, sp):
for j in range(0, h - sp, sp):
patch = img[i:i + sp, j:j + sp]
var_global = np.var(patch)
mean_global = np.mean(patch)
if var_global < max_var and mean_global > min_mean:
rgb_patch = rgb_img[i:i + sp, j:j + sp, :]
collect_patchs.append(rgb_patch)
return collect_patchs
if __name__ == '__main__':
if opt.dataset == 'df2k':
img_dir = PATHS[opt.dataset][opt.artifacts]['source']
noise_dir = PATHS['datasets']['df2k'] + '/Corrupted_noise'
sp = 256
max_var = 20
min_mean = 0
else:
img_dir = PATHS[opt.dataset][opt.artifacts]['hr']['train']
noise_dir = PATHS['datasets']['dped'] + '/DPEDiphone_noise_sp32v20m50'
sp = 256
max_var = 20
min_mean = 50
assert not os.path.exists(noise_dir)
os.mkdir(noise_dir)
img_paths = sorted(glob.glob(osp.join(img_dir, '*.png')))
cnt = 0
for path in img_paths:
img_name = osp.splitext(osp.basename(path))[0]
print('**********', img_name, '**********')
img = Image.open(path).convert('RGB')
patchs = noise_patch(img, sp, max_var, min_mean)
for idx, patch in enumerate(patchs):
save_path = osp.join(noise_dir, '{}_{:03}.png'.format(img_name, idx))
cnt += 1
print('collect:', cnt, save_path)
Image.fromarray(patch).save(save_path)
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This subpackage contains QNode, quantum function, device, and tape transforms.
.. currentmodule:: pennylane
Transforms
----------
Transforms that act on QNodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept QNodes, and return new transformed functions
that compute the desired quantity.
.. autosummary::
:toctree: api
~transforms.classical_jacobian
~batch_params
~batch_input
~metric_tensor
~adjoint_metric_tensor
~specs
~transforms.mitigate_with_zne
~transforms.split_non_commuting
Transforms that act on quantum functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum functions (Python functions
containing quantum operations) that are used to construct QNodes.
.. autosummary::
:toctree: api
~adjoint
~ctrl
~transforms.cond
~defer_measurements
~apply_controlled_Q
~quantum_monte_carlo
~transforms.insert
Transforms for circuit compilation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This set of transforms accept quantum functions, and perform basic circuit compilation tasks.
.. autosummary::
:toctree: api
~compile
~transforms.cancel_inverses
~transforms.commute_controlled
~transforms.merge_rotations
~transforms.single_qubit_fusion
~transforms.unitary_to_rot
~transforms.merge_amplitude_embedding
~transforms.remove_barrier
~transforms.undo_swaps
~transforms.pattern_matching_optimization
~transforms.transpile
There are also utility functions and decompositions available that assist with
both transforms, and decompositions within the larger PennyLane codebase.
.. autosummary::
:toctree: api
~transforms.zyz_decomposition
~transforms.two_qubit_decomposition
~transforms.set_decomposition
~transforms.simplify
~transforms.pattern_matching
There are also utility functions that take a circuit and return a DAG.
.. autosummary::
:toctree: api
~transforms.commutation_dag
~transforms.CommutationDAG
~transforms.CommutationDAGNode
Transform for circuit cutting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :func:`~.cut_circuit` transform accepts a QNode and returns a new function that cuts the original circuit,
allowing larger circuits to be split into smaller circuits that are compatible with devices that
have a restricted number of qubits.
.. autosummary::
:toctree: api
~cut_circuit
The :func:`~.cut_circuit_mc` transform is designed to be used for cutting circuits which contain :func:`~.sample`
measurements and is implemented using a Monte Carlo method. Similarly to the :func:`~.cut_circuit`
transform, this transform accepts a QNode and returns a new function that cuts the original circuit.
This transform can also accept an optional classical processing function to calculate an
expectation value.
.. autosummary::
:toctree: api
~cut_circuit_mc
There are also low-level functions that can be used to build up the circuit cutting functionalities:
.. autosummary::
:toctree: api
~transforms.qcut.tape_to_graph
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.expand_fragment_tapes_mc
~transforms.qcut.qcut_processing_fn
~transforms.qcut.qcut_processing_fn_sample
~transforms.qcut.qcut_processing_fn_mc
~transforms.qcut.CutStrategy
~transforms.qcut.kahypar_cut
~transforms.qcut.place_wire_cuts
~transforms.qcut.find_and_place_cuts
Transforms that act on tapes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum tapes, and return one or
more tapes as well as a classical processing function.
.. autosummary::
:toctree: api
~transforms.measurement_grouping
~transforms.hamiltonian_expand
Decorators and utility functions
--------------------------------
The following decorators and convenience functions are provided
to help build custom QNode, quantum function, and tape transforms:
.. autosummary::
:toctree: api
~single_tape_transform
~batch_transform
~qfunc_transform
~op_transform
~transforms.make_tape
~transforms.map_batch_transform
~transforms.create_expand_fn
~transforms.create_decomp_expand_fn
~transforms.expand_invalid_trainable
~transforms.expand_multipar
~transforms.expand_trainable_multipar
~transforms.expand_nonunitary_gen
"""
# Import the decorators first to prevent circular imports when used in other transforms
from .batch_transform import batch_transform, map_batch_transform
from .qfunc_transforms import make_tape, single_tape_transform, qfunc_transform
from .op_transforms import op_transform
from .adjoint import adjoint
from .batch_params import batch_params
from .batch_input import batch_input
from .classical_jacobian import classical_jacobian
from .condition import cond, Conditional
from .compile import compile
from .control import ControlledOperation, ctrl
from .decompositions import zyz_decomposition, two_qubit_decomposition
from .defer_measurements import defer_measurements
from .hamiltonian_expand import hamiltonian_expand
from .split_non_commuting import split_non_commuting
from .measurement_grouping import measurement_grouping
from .metric_tensor import metric_tensor
from .adjoint_metric_tensor import adjoint_metric_tensor
from .insert_ops import insert
from .mitigate import mitigate_with_zne
from .optimization import (
cancel_inverses,
commute_controlled,
merge_rotations,
single_qubit_fusion,
merge_amplitude_embedding,
remove_barrier,
undo_swaps,
pattern_matching,
pattern_matching_optimization,
)
from .specs import specs
from .qmc import apply_controlled_Q, quantum_monte_carlo
from .unitary_to_rot import unitary_to_rot
from .commutation_dag import (
commutation_dag,
is_commuting,
CommutationDAG,
CommutationDAGNode,
simplify,
)
from .tape_expand import (
expand_invalid_trainable,
expand_multipar,
expand_nonunitary_gen,
expand_trainable_multipar,
create_expand_fn,
create_decomp_expand_fn,
set_decomposition,
)
from .transpile import transpile
from . import qcut
from .qcut import cut_circuit, cut_circuit_mc
|
"""
This module demonstrates and practices:
-- using ARGUMENTs in function CALLs,
-- having PARAMETERs in function DEFINITIONs, and
-- RETURNING a value from a function,
possibly CAPTURING the RETURNED VALUE in a VARIABLE.
-- UNIT TESTING.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import m3t_tester
def main():
""" Calls the TEST functions in this module. """
run_test_sum_of_digits()
run_test_digits_in_cube()
run_test_digits_in_power()
run_test_fancy_sums_of_digits()
# ------------------------------------------------------------------
# TODO: 9. DO THIS LAST!
# -- Uncomment the line of code below to run the main function
# in m3t_tester.py (do not make changes to it).
# It runs OUR tests on your code.
# -- Check to see whether all test cases indicate they
# "COMPLETED SUCCESSFULLY!"
# -- If your code fails any of OUR tests but passes YOUR tests,
# then you are likely not TESTING the methods correctly.
# ** Ask a TA or your professor for help in that case. **
# ------------------------------------------------------------------
# m3t_tester.main()
def run_test_sum_of_digits():
""" Tests the sum_of_digits function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this TEST function, as follows:
#
# Step 1: This TEST function tests the sum_of_digits function.
# So read the doc-string of the sum_of_digits function
# defined below. Be sure that you understand from the
# doc-string what the sum_of_digits function SHOULD return.
#
# Step 2: Pick a test case: a number that you could send as
# an actual argument to the sum_of_digits function.
# - For example, you could pick the test case 826.
#
# Step 3: Figure out the CORRECT (EXPECTED) answer for your
# test case. In the example of 826 the correct answer
# for the sum of its digits is 8 + 2 + 6, which is 16.
#
# Step 4: Write code that prints both the EXPECTED answer
# and the ACTUAL answer returned when you call the function.
# See the example below.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_of_digits function:')
print('--------------------------------------------------')
# Test 1:
expected = 16
answer = sum_of_digits(826)
print('Test 1 expected:', expected)
print(' actual: ', answer)
# ------------------------------------------------------------------
# TO DO: 2 (continued).
# Below this comment, add 3 more test cases of your own choosing.
# ------------------------------------------------------------------
def sum_of_digits(number):
"""
What comes in: An integer.
What goes out: The sum of the digits in the given integer.
Side effects: None.
Example:
If the integer is 83135,
this function returns (8 + 3 + 1 + 3 + 5), which is 20.
"""
# ------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
#
# Ask for help if you are unsure what it means to CALL a function.
# The ONLY part of this function that you need to understand is
# the doc-string above. Treat this function as a black box.
# ------------------------------------------------------------------
if number < 0:
number = -number
digit_sum = 0
while True:
if number == 0:
break
digit_sum = digit_sum + (number % 10)
number = number // 10
return digit_sum
def run_test_digits_in_cube():
""" Tests the digits_in_cube function. """
# ------------------------------------------------------------------
# TODO: 3. Implement this function.
# It TESTS the digits_in_cube function defined below.
# Include at least ** 3 ** tests.
#
# To implement this TEST function, use the same 4 steps as above:
#
# Step 1: Read the doc-string of digits_in_cube below.
# Understand what that function SHOULD return.
#
# Step 2: Pick a test case: a number(s) that you could send as
# actual argument(s) to the digits_in_cube function.
#
# Step 3: Figure out the CORRECT (EXPECTED) answer for your test case.
#
# Step 4: Write code that prints both the EXPECTED answer
# and the ACTUAL answer returned when you call the function.
# Follow the same form as in previous examples.
#
# Include at least ** 3 ** tests.
# ------------------------------------------------------------------
print()
print('-----------------------------------------------------')
print('Testing the digits_in_cube function:')
print('-----------------------------------------------------')
def digits_in_cube(n):
"""
What comes in: A positive integer.
What goes out: The sum of the digits in the CUBE of the integer.
Side effects: None.
Example:
If the integer (n) is 5 (so n cubed is 125),
this function returns (1 + 2 + 5), which is 8.
"""
# ------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Note that you should write its TEST function first (above).
# That is called TEST-DRIVEN DEVELOPMENT (TDD).
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
def run_test_digits_in_power():
""" Tests the digits_in_power function. """
# ------------------------------------------------------------------
# TODO: 5. Implement this function.
# It TESTS the digits_in_power function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the digits_in_power function:')
print('--------------------------------------------------')
def digits_in_power(n, k):
"""
What comes in: Two positive integers, n and k.
What goes out:
The sum of the digits in x, where x is n raised to the kth power.
Side effects: None.
Example:
If the arguments are 12 and 3, respectively,
this function returns 18
since 12 to the 3rd power is 1728 (whose digits sum to 18).
"""
# ------------------------------------------------------------------
# TODO: 6. Implement and test this function.
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
def run_test_fancy_sums_of_digits():
""" Tests the fancy_sums_of_digits function. """
# ------------------------------------------------------------------
# TODO: 7. Implement this function.
# It TESTS the fancy_sums_of_digits function defined below.
# Include at least ** 3 ** tests.
#
# Use the same 4-step process as in implementing the previous
# TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the fancy_sums_of_digits function:')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# HINT: For your 1st test, consider n=10. Figure out BY HAND
# the correct (expected) answer for that test case. (It's easy.)
# The doc-string below gives test cases you can use for
# your 2nd and 3rd tests but READ THOSE TEST CASES CAREFULLY
# in the doc-string to be sure that you understand the specification.
# ------------------------------------------------------------------
def fancy_sums_of_digits(n):
"""
What comes in: A positive integer n.
What goes out:
-- Let X denote the sum of the digits in (n ** 1000).
-- Let Y denote the sum of the digits in (n ** 999).
This function RETURNs the sum of the digits in (X ** Y).
Side effects: None.
Examples:
-- If n is 2, then:
-- the sum of the digits in n ** 1000 is 1366 (trust me!).
-- the sum of the digits in n ** 999 is 1367 (trust me!).
-- so X ** Y is VERY LARGE in this case
(don't try to print it!)
-- the sum of the digits in (X ** Y) is 19084 (trust me!)
-- so this function returns 19084.
-- If n is 35, then:
-- the sum of the digits in n ** 1000 is 7021 (trust me!).
-- the sum of the digits in n ** 999 is 7145 (trust me!).
-- so X ** Y is VERY LARGE in this case
(don't try to print it!)
-- the sum of the digits in (X ** Y) is 124309 (trust me!)
-- so this function returns 124309.
"""
# ------------------------------------------------------------------
# TODO: 8. Implement and test this function.
#
####################################################################
# IMPORTANT: CALL, as many times as needed,
# the sum_of_digits function that is DEFINED ABOVE.
####################################################################
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# This unusual form is necessary for the special testing we provided.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
from django.shortcuts import render, HttpResponseRedirect
from django.urls import reverse
from urllib.parse import urlencode, unquote
import requests
from bs4 import BeautifulSoup
from django.utils.crypto import get_random_string
from django.contrib import messages
from urllib.parse import urlparse, urljoin
from django.contrib.auth import get_user_model
from django.contrib.auth import login as login_auth
def redirect_logged_in_users(function):
def _function(request,*args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("feed:index"))
return function(request, *args, **kwargs)
return _function
@redirect_logged_in_users
def index(request):
cleanup(request)
return render(request, 'indieauth/index.html', {})
def login(request):
try:
if request.method == 'POST':
site = request.POST.get("site", None)
url_data = urlparse(site)
if site and url_data.netloc != '' and (url_data.scheme == 'http' or url_data.scheme == 'https'):
if url_data.path == '':
site = site + '/'
print(site)
r = requests.get(site)
soup = BeautifulSoup(r.text, 'html.parser')
unique_id = get_random_string(length=32)
for link in soup.find_all('link'):
if link.get('rel')[0] == "authorization_endpoint":
authorization_endpoint = link.get('href')
# if relative URL, this will attach it to the end of the redirected url
authorization_endpoint = urljoin(r.url, authorization_endpoint)
if r.headers.get('Link', None):
links = r.headers['Link']
print(links)
for link in links.split(","):
possible_url = link.split(";")[0].strip()
possible_url = possible_url[1:len(possible_url)-1]
possible_rel = link.split(";")[1].strip()
if possible_rel == "rel=authorization_endpoint":
authorization_endpoint = urljoin(r.url, possible_url)
# after redirects, the final URL will be contained in the response
site = r.url
print(r.history)
searchHistory = True
i = -1
# ensure that if there's temp redirects that the "me" url is always the last permanent redirect
while searchHistory and (i*-1) <= len(r.history):
history_piece = r.history[i]
if history_piece.status_code == 301:
site = history_piece.url
i -= 1
# If ALL of them are temporary redirects than use the initial value
if all(i.status_code == 302 for i in r.history):
site = request.POST.get("site", None)
if authorization_endpoint:
request.session['authorization_endpoint']=authorization_endpoint
request.session['client_id'] = site
request.session['state'] = unique_id
payload = {'me': site,
'redirect_uri': request.build_absolute_uri(reverse('indieauth:redirect')),
'client_id': f'{request.scheme}://{ request.get_host() }/indieauth/application_info',
'state': unique_id,
'response_type': 'id'}
redirect_site = authorization_endpoint + "?" + urlencode(payload)
return HttpResponseRedirect(redirect_site)
else:
cleanup(request)
messages.error(request, 'No authorization_endpoint found.')
return HttpResponseRedirect(reverse('indieauth:index'))
except Exception as e:
print(e)
messages.error(request, 'Error in retrieving url.')
return HttpResponseRedirect(reverse('indieauth:index'))
messages.error(request, 'No site submitted or the URL submitted was not valid.')
return HttpResponseRedirect(reverse('indieauth:index'))
def redirect(request):
if request.GET.get('state', None) == request.session.get('state', None) and request.session.get('state', None) != None:
client_id = request.session['client_id']
authorization_endpoint = request.session['authorization_endpoint']
redirect_uri = request.build_absolute_uri(reverse('indieauth:redirect'))
code = request.GET.get('code')
r = requests.post(authorization_endpoint, data = {'code':code, 'client_id':client_id, 'redirect_uri': redirect_uri})
if r.headers['content-type'] == "application/x-www-form-urlencoded":
user_site = unquote(r.text)[3:]
elif r.headers['content-type'] == "application/json":
user_site = r.text['me']
else:
user_site = None
user_site_matches_domain = urlparse(client_id).netloc == urlparse(user_site).netloc
print(urlparse(client_id).netloc, urlparse(user_site).netloc)
if r.status_code == 200 and user_site and user_site_matches_domain:
messages.success(request, 'Your URL is: ' + user_site)
user_model = get_user_model()
user = user_model.objects.filter(site=user_site)
if user:
login_auth(request, user[0])
else:
user = user_model.objects.create_user(username=user_site, site=user_site)
user.set_unusable_password()
login_auth(request, user)
cleanup(request)
return HttpResponseRedirect(reverse('feed:index'))
else:
messages.error(request, 'Error in URL. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
else:
messages.error(request, 'Major error. Likely timeout. Please try again.')
cleanup(request)
return HttpResponseRedirect(reverse('indieauth:index'))
def cleanup(request):
try:
del request.session['authorization_endpoint']
del request.session['state']
del request.session['client_id']
except KeyError:
pass
def application_info(request):
return render(request, "indieauth/application_info.html")
|
"""Contains the Switch parent class."""
import asyncio
from functools import partial
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.machine import MachineController
from mpf.core.system_wide_device import SystemWideDevice
from mpf.core.utility_functions import Util
from mpf.core.platform import SwitchConfig
from mpf.devices.device_mixins import DevicePositionMixin
MYPY = False
if MYPY: # pragma: no cover
from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface
from mpf.core.platform import SwitchPlatform
@DeviceMonitor("state", "recycle_jitter_count")
class Switch(SystemWideDevice, DevicePositionMixin):
"""A switch in a pinball machine."""
config_section = 'switches'
collection = 'switches'
class_label = 'switch'
__slots__ = ["hw_switch", "platform", "state", "hw_state", "invert", "recycle_secs", "recycle_clear_time",
"recycle_jitter_count", "_events_to_post", "last_change"]
def __init__(self, machine: MachineController, name: str) -> None:
"""Initialise switch."""
self.hw_switch = None # type: SwitchPlatformInterface
self.platform = None # type: SwitchPlatform
super().__init__(machine, name)
self.state = 0
""" The logical state of a switch. 1 = active, 0 = inactive. This takes
into consideration the NC or NO settings for the switch."""
self.hw_state = 0
""" The physical hardware state of the switch. 1 = active,
0 = inactive. This is what the actual hardware is reporting and does
not consider whether a switch is NC or NO."""
self.invert = 0
self.recycle_secs = 0
self.recycle_clear_time = None
self.recycle_jitter_count = 0
self._events_to_post = {0: [], 1: []}
self.last_change = -100000
# register switch so other devices can add handlers to it
self.machine.switch_controller.register_switch(self)
@classmethod
def device_class_init(cls, machine: MachineController):
"""Register handler for duplicate switch number checks."""
machine.events.add_handler("init_phase_4",
cls._check_duplicate_switch_numbers,
machine=machine)
@staticmethod
def _check_duplicate_switch_numbers(machine, **kwargs):
del kwargs
check_set = set()
for switch in machine.switches:
key = (switch.platform, switch.hw_switch.number)
if key in check_set:
raise AssertionError(
"Duplicate switch number {} for switch {}".format(
switch.hw_switch.number, switch))
check_set.add(key)
def validate_and_parse_config(self, config, is_mode_config, debug_prefix: str = None):
"""Validate switch config."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
platform = self.machine.get_platform_sections(
'switches', getattr(config, "platform", None))
config['platform_settings'] = platform.validate_switch_section(
self, config.get('platform_settings', None))
self._configure_device_logging(config)
return config
def _create_activation_event(self, event_str: str, state: int):
if "|" in event_str:
event, ev_time = event_str.split("|")
ms = Util.string_to_ms(ev_time)
self.machine.switch_controller.add_switch_handler(
switch_name=self.name,
state=state,
callback=partial(self.machine.events.post, event=event),
ms=ms
)
else:
self._events_to_post[state].append(event_str)
def _recycle_passed(self, state):
self.recycle_clear_time = None
# only post event if the switch toggled
if self.state != state:
self._post_events(self.state)
def _post_events_with_recycle(self, state):
# if recycle is ongoing do nothing
if not self.recycle_clear_time:
# calculate clear time
self.recycle_clear_time = self.machine.clock.get_time() + self.recycle_secs
self.machine.clock.loop.call_at(self.recycle_clear_time, partial(self._recycle_passed, state))
# post event
self._post_events(state)
def _post_events(self, state):
for event in self._events_to_post[state]:
if self.machine.events.does_event_exist(event):
self.machine.events.post(event)
@asyncio.coroutine
def _initialize(self):
yield from super()._initialize()
self.platform = self.machine.get_platform_sections(
'switches', self.config['platform'])
if self.config['type'].upper() == 'NC':
self.invert = 1
self.recycle_secs = self.config['ignore_window_ms'] / 1000.0
config = SwitchConfig(invert=self.invert,
debounce=self.config['debounce'])
try:
self.hw_switch = self.platform.configure_switch(
self.config['number'], config, self.config['platform_settings'])
except AssertionError as e:
raise AssertionError("Failed to configure switch {} in platform. See error above".format(self.name)) from e
if self.recycle_secs:
self.add_handler(state=1, callback=self._post_events_with_recycle, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events_with_recycle, callback_kwargs={"state": 0})
else:
self.add_handler(state=1, callback=self._post_events, callback_kwargs={"state": 1})
self.add_handler(state=0, callback=self._post_events, callback_kwargs={"state": 0})
if self.machine.config['mpf']['auto_create_switch_events']:
self._create_activation_event(
self.machine.config['mpf']['switch_event_active'].replace(
'%', self.name), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_event_inactive'].replace(
'%', self.name), 0)
for tag in self.tags:
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag), 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_active", 1)
self._create_activation_event(
self.machine.config['mpf']['switch_tag_event'].replace(
'%', tag) + "_inactive", 0)
for event in Util.string_to_lowercase_list(
self.config['events_when_activated']):
self._create_activation_event(event, 1)
for event in Util.string_to_lowercase_list(
self.config['events_when_deactivated']):
self._create_activation_event(event, 0)
# pylint: disable-msg=too-many-arguments
def add_handler(self, callback, state=1, ms=0, return_info=False,
callback_kwargs=None):
"""Add switch handler (callback) for this switch which is called when this switch state changes.
Note that this method just calls the
:doc:`Switch Controller's <self.machine.switch_controller>`
``add_switch_handler()`` method behind the scenes.
Args:
callback: A callable method that will be called when the switch
state changes.
state: The state that the switch which change into which triggers
the callback to be called. Values are 0 or 1, with 0 meaning
the switch changed to inactive, and 1 meaning the switch
changed to an active state.
ms: How many milliseconds the switch needs to be in the new state
before the callback is called. Default is 0 which means that
the callback will be called immediately. You can use this
setting as a form of software debounce, as the switch needs to
be in the state consistently before the callback is called.
return_info: If True, the switch controller will pass the
parameters of the switch handler as arguments to the callback,
including switch_name, state, and ms.
callback_kwargs: Additional kwargs that will be passed with the
callback.
"""
return self.machine.switch_controller.add_switch_handler(
self.name, callback, state, ms, return_info, callback_kwargs)
def remove_handler(self, callback, state=1, ms=0):
"""Remove switch handler for this switch."""
return self.machine.switch_controller.remove_switch_handler(
self.name, callback, state, ms)
|
"""
Created on May 17, 2013
@author: tanel
"""
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
import logging
import thread
import os
logger = logging.getLogger(__name__)
import pdb
class DecoderPipeline2(object):
def __init__(self, conf={}):
logger.info("Creating decoder using conf: %s" % conf)
self.create_pipeline(conf)
self.outdir = conf.get("out-dir", None)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
elif not os.path.isdir(self.outdir):
raise Exception("Output directory %s already exists as a file" % self.outdir)
self.result_handler = None
self.full_result_handler = None
self.eos_handler = None
self.error_handler = None
self.request_id = "<undefined>"
def create_pipeline(self, conf):
self.appsrc = Gst.ElementFactory.make("appsrc", "appsrc")
self.decodebin = Gst.ElementFactory.make("decodebin", "decodebin")
self.audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
self.audioresample = Gst.ElementFactory.make("audioresample", "audioresample")
self.tee = Gst.ElementFactory.make("tee", "tee")
self.queue1 = Gst.ElementFactory.make("queue", "queue1")
self.filesink = Gst.ElementFactory.make("filesink", "filesink")
self.queue2 = Gst.ElementFactory.make("queue", "queue2")
self.asr = Gst.ElementFactory.make("kaldinnet2onlinedecoder", "asr")
self.fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
# This needs to be set first
if "use-threaded-decoder" in conf["decoder"]:
self.asr.set_property("use-threaded-decoder", conf["decoder"]["use-threaded-decoder"])
for (key, val) in conf.get("decoder", {}).iteritems():
if key != "use-threaded-decoder":
logger.info("Setting decoder property: %s = %s" % (key, val))
self.asr.set_property(key, val)
self.appsrc.set_property("is-live", True)
self.filesink.set_property("location", "/dev/null")
logger.info('Created GStreamer elements')
self.pipeline = Gst.Pipeline()
for element in [self.appsrc, self.decodebin, self.audioconvert, self.audioresample, self.tee,
self.queue1, self.filesink,
self.queue2, self.asr, self.fakesink]:
logger.debug("Adding %s to the pipeline" % element)
self.pipeline.add(element)
logger.info('Linking GStreamer elements')
self.appsrc.link(self.decodebin)
#self.appsrc.link(self.audioconvert)
self.decodebin.connect('pad-added', self._connect_decoder)
self.audioconvert.link(self.audioresample)
self.audioresample.link(self.tee)
self.tee.link(self.queue1)
self.queue1.link(self.filesink)
self.tee.link(self.queue2)
self.queue2.link(self.asr)
self.asr.link(self.fakesink)
# Create bus and connect several handlers
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.enable_sync_message_emission()
self.bus.connect('message::eos', self._on_eos)
self.bus.connect('message::error', self._on_error)
#self.bus.connect('message::cutter', self._on_cutter)
self.asr.connect('partial-result', self._on_partial_result)
self.asr.connect('final-result', self._on_final_result)
self.asr.connect('full-final-result', self._on_full_final_result)
logger.info("Setting pipeline to READY")
self.pipeline.set_state(Gst.State.READY)
logger.info("Set pipeline to READY")
def _connect_decoder(self, element, pad):
logger.info("%s: Connecting audio decoder" % self.request_id)
pad.link(self.audioconvert.get_static_pad("sink"))
logger.info("%s: Connected audio decoder" % self.request_id)
def _on_partial_result(self, asr, hyp):
logger.info("%s: Got partial result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, False)
def _on_final_result(self, asr, hyp):
logger.info("%s: Got final result: %s" % (self.request_id, hyp.decode('utf8')))
if self.result_handler:
self.result_handler(hyp, True)
def _on_full_final_result(self, asr, result_json):
logger.info("%s: Got full final result: %s" % (self.request_id, result_json.decode('utf8')))
if self.full_result_handler:
self.full_result_handler(result_json)
def _on_error(self, bus, msg):
self.error = msg.parse_error()
logger.error(self.error)
self.finish_request()
if self.error_handler:
self.error_handler(self.error[0].message)
def _on_eos(self, bus, msg):
logger.info('%s: Pipeline received eos signal' % self.request_id)
#self.decodebin.unlink(self.audioconvert)
self.finish_request()
if self.eos_handler:
self.eos_handler[0](self.eos_handler[1])
def get_adaptation_state(self):
return self.asr.get_property("adaptation-state")
def set_adaptation_state(self, adaptation_state):
"""Sets the adaptation state to a certian value, previously retrieved using get_adaptation_state()
Should be called after init_request(..)
"""
return self.asr.set_property("adaptation-state", adaptation_state)
def finish_request(self):
logger.info("%s: Resetting decoder state" % self.request_id)
if self.outdir:
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "/dev/null")
self.filesink.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.NULL)
self.request_id = "<undefined>"
def init_request(self, id, caps_str):
self.request_id = id
logger.info("%s: Initializing request" % (self.request_id))
if caps_str and len(caps_str) > 0:
logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
caps = Gst.caps_from_string(caps_str)
self.appsrc.set_property("caps", caps)
else:
#caps = Gst.caps_from_string("")
self.appsrc.set_property("caps", None)
#self.pipeline.set_state(Gst.State.READY)
pass
#self.appsrc.set_state(Gst.State.PAUSED)
if self.outdir:
self.pipeline.set_state(Gst.State.PAUSED)
self.filesink.set_state(Gst.State.NULL)
self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id))
self.filesink.set_state(Gst.State.PLAYING)
#self.filesink.set_state(Gst.State.PLAYING)
#self.decodebin.set_state(Gst.State.PLAYING)
self.pipeline.set_state(Gst.State.PLAYING)
self.filesink.set_state(Gst.State.PLAYING)
# push empty buffer (to avoid hang on client diconnect)
#buf = Gst.Buffer.new_allocate(None, 0, None)
#self.appsrc.emit("push-buffer", buf)
# reset adaptation state
self.set_adaptation_state("")
def process_data(self, data):
logger.debug('%s: Pushing buffer of size %d to pipeline' % (self.request_id, len(data)))
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
self.appsrc.emit("push-buffer", buf)
logger.debug('%s: Pushing buffer done' % self.request_id)
def end_request(self):
logger.info("%s: Pushing EOS to pipeline" % self.request_id)
self.appsrc.emit("end-of-stream")
def set_result_handler(self, handler):
self.result_handler = handler
def set_full_result_handler(self, handler):
self.full_result_handler = handler
def set_eos_handler(self, handler, user_data=None):
self.eos_handler = (handler, user_data)
def set_error_handler(self, handler):
self.error_handler = handler
def cancel(self):
logger.info("%s: Sending EOS to pipeline in order to cancel processing" % self.request_id)
self.appsrc.emit("end-of-stream")
#self.asr.set_property("silent", True)
#self.pipeline.set_state(Gst.State.NULL)
#if (self.pipeline.get_state() == Gst.State.PLAYING):
#logger.debug("Sending EOS to pipeline")
#self.pipeline.send_event(Gst.Event.new_eos())
#self.pipeline.set_state(Gst.State.READY)
logger.info("%s: Cancelled pipeline" % self.request_id)
|
print('---------- Bem vindo ao exercicio 61 ------')
print('\033[32m Reçaca o desafio 51. Lendo o primeiro termo e a razao de uma PA. Mostrando os 10 primeiros termos da progressa usando a estrutura while\033[m')
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
termo = primeiro
c = 1
while c <= 10:
print('{} -> '.format(termo), end='')
termo += razao
c += 1
print('Fim')
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'spotify-client'
copyright = '2020, MoodyTunes'
author = 'MoodyTunes'
# The full version, including alpha/beta/rc tags
with open("../VERSION", "r") as version_file:
version = version_file.read().strip()
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
pygments_style = 'sphinx'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
from netmiko import ConnectHandler
import yaml
from pprint import pprint
def send_show_command(device, show_command):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_command(show_command)
return result
def send_config_commands(device, config_commands):
with ConnectHandler(**device) as ssh:
ssh.enable()
result = ssh.send_config_set(config_commands)
return result
def send_commands(device, config=None, show=None):
if show:
return send_show_command(device_list, show)
elif config:
return send_config_commands(device_list, config)
if __name__ == "__main__":
commands = ["logging 10.255.255.1", "logging buffered 20010", "no logging console"]
show_command = "sh ip int br"
with open("devices.yaml") as f:
dev_list = yaml.safe_load(f)
send_commands(dev_list, config=commands)
send_commands(dev_list, show=show_command)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.